diff --git a/src/bootstrap/bin/rustc.rs b/src/bootstrap/bin/rustc.rs index 7a765973e20f8..34edca4cdeda1 100644 --- a/src/bootstrap/bin/rustc.rs +++ b/src/bootstrap/bin/rustc.rs @@ -149,7 +149,7 @@ fn main() { // // `compiler_builtins` are unconditionally compiled with panic=abort to // workaround undefined references to `rust_eh_unwind_resume` generated - // otherwise, see issue https://github.com/rust-lang/rust/issues/43095. + // otherwise, see issue #43095. if crate_name == "panic_abort" || crate_name == "compiler_builtins" && stage != "0" { cmd.arg("-C").arg("panic=abort"); diff --git a/src/bootstrap/builder.rs b/src/bootstrap/builder.rs index 9d037dad9ccbd..04c95b04e01ce 100644 --- a/src/bootstrap/builder.rs +++ b/src/bootstrap/builder.rs @@ -80,7 +80,7 @@ pub trait Step: 'static + Clone + Debug + PartialEq + Eq + Hash { // It is reasonable to not have an implementation of make_run for rules // who do not want to get called from the root context. This means that // they are likely dependencies (e.g., sysroot creation) or similar, and - // as such calling them from ./x.py isn't logical. + // as such calling them from `./x.py` isn't logical. unimplemented!() } } @@ -236,11 +236,11 @@ impl StepDescription { #[derive(Clone)] pub struct ShouldRun<'a> { pub builder: &'a Builder<'a>, - // use a BTreeSet to maintain sort order + // Use a `BTreeSet` to maintain sort order. paths: BTreeSet, // If this is a default rule, this is an additional constraint placed on - // its run. Generally something like compiler docs being enabled. + // its run (generally something like compiler docs being enabled). is_really_default: bool, } @@ -249,7 +249,8 @@ impl<'a> ShouldRun<'a> { ShouldRun { builder, paths: BTreeSet::new(), - is_really_default: true, // by default no additional conditions + // By default no additional conditions. + is_really_default: true, } } @@ -277,12 +278,12 @@ impl<'a> ShouldRun<'a> { self } - // single, non-aliased path + // Single, non-aliased path. pub fn path(self, path: &str) -> Self { self.paths(&[path]) } - // multiple aliases for the same job + // Multiple aliases for the same job. pub fn paths(mut self, paths: &[&str]) -> Self { self.paths .insert(PathSet::Set(paths.iter().map(PathBuf::from).collect())); @@ -301,7 +302,7 @@ impl<'a> ShouldRun<'a> { self } - // allows being more explicit about why should_run in Step returns the value passed to it + // Allows being more explicit about why `Step::should_run` returns the value passed to it. pub fn never(mut self) -> ShouldRun<'a> { self.paths.insert(PathSet::empty()); self @@ -677,7 +678,7 @@ impl<'a> Builder<'a> { let compiler = self.compiler(self.top_stage, host); cmd.env("RUSTC_STAGE", compiler.stage.to_string()) .env("RUSTC_SYSROOT", self.sysroot(compiler)) - // Note that this is *not* the sysroot_libdir because rustdoc must be linked + // Note that this is *not* the `sysroot_libdir` because rustdoc must be linked // equivalently to rustc. .env("RUSTDOC_LIBDIR", self.rustc_libdir(compiler)) .env("CFG_RELEASE_CHANNEL", &self.config.channel) @@ -813,12 +814,12 @@ impl<'a> Builder<'a> { } cargo.arg("-j").arg(self.jobs().to_string()); - // Remove make-related flags to ensure Cargo can correctly set things up + // Remove make-related flags to ensure Cargo can correctly set things up. cargo.env_remove("MAKEFLAGS"); cargo.env_remove("MFLAGS"); - // FIXME: Temporary fix for https://github.com/rust-lang/cargo/issues/3005 - // Force cargo to output binaries with disambiguating hashes in the name + // FIXME: temporary fix for rust-lang/cargo#3005. + // Force cargo to output binaries with disambiguating hashes in the name. let metadata = if compiler.stage == 0 { // Treat stage0 like special channel, whether it's a normal prior- // release rustc or a local rebuild with the same version, so we @@ -863,7 +864,7 @@ impl<'a> Builder<'a> { // "raw" compiler in that it's the exact snapshot we download. Normally // the stage0 build means it uses libraries build by the stage0 // compiler, but for tools we just use the precompiled libraries that - // we've downloaded + // we've downloaded. let use_snapshot = mode == Mode::ToolBootstrap; assert!(!use_snapshot || stage == 0 || self.local_rebuild); @@ -920,7 +921,7 @@ impl<'a> Builder<'a> { if mode.is_tool() { // Tools like cargo and rls don't get debuginfo by default right now, but this can be - // enabled in the config. Adding debuginfo makes them several times larger. + // enabled in the config. Adding debuginfo makes them several times larger. if self.config.rust_debuginfo_tools { cargo.env("RUSTC_DEBUGINFO", self.config.rust_debuginfo.to_string()); cargo.env( @@ -1028,7 +1029,7 @@ impl<'a> Builder<'a> { // Build scripts use either the `cc` crate or `configure/make` so we pass // the options through environment variables that are fetched and understood by both. // - // FIXME: the guard against msvc shouldn't need to be here + // FIXME: the guard against MSVC shouldn't need to be here. if target.contains("msvc") { if let Some(ref cl) = self.config.llvm_clang_cl { cargo.env("CC", cl).env("CXX", cl); @@ -1040,7 +1041,7 @@ impl<'a> Builder<'a> { Some(ref s) => s, None => return s.display().to_string(), }; - // FIXME: the cc-rs crate only recognizes the literal strings + // FIXME: the cc-rs crate only recognizes the literal strings. // `ccache` and `sccache` when doing caching compilations, so we // mirror that here. It should probably be fixed upstream to // accept a new env var or otherwise work with custom ccache @@ -1080,12 +1081,12 @@ impl<'a> Builder<'a> { cargo.env("RUSTC_SAVE_ANALYSIS", "api".to_string()); } - // For `cargo doc` invocations, make rustdoc print the Rust version into the docs + // For `cargo doc` invocations, make rustdoc print the Rust version into the docs. cargo.env("RUSTDOC_CRATE_VERSION", self.rust_version()); - // Environment variables *required* throughout the build + // Environment variables *required* throughout the build. // - // FIXME: should update code to not require this env var + // FIXME: should update code to not require this env var. cargo.env("CFG_COMPILER_HOST_TRIPLE", target); // Set this for all builds to make sure doc builds also get it. @@ -1476,7 +1477,7 @@ mod __test { #[test] fn dist_with_target_flag() { let mut config = configure(&["B"], &["C"]); - config.run_host_only = false; // as-if --target=C was passed + config.run_host_only = false; // as if `--target=C` were passed let build = Build::new(config); let mut builder = Builder::new(&build); builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Dist), &[]); diff --git a/src/bootstrap/cache.rs b/src/bootstrap/cache.rs index 5f84816789a68..ada8e871dfc14 100644 --- a/src/bootstrap/cache.rs +++ b/src/bootstrap/cache.rs @@ -235,7 +235,8 @@ lazy_static! { pub struct Cache( RefCell, // actually a HashMap> + // Actually a `HashMap>`. + Box, >> ); diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs index 8fabb8c3fd08f..a75c0cc613751 100644 --- a/src/bootstrap/compile.rs +++ b/src/bootstrap/compile.rs @@ -165,12 +165,12 @@ pub fn std_cargo(builder: &Builder, let features = builder.std_features(); if compiler.stage != 0 && builder.config.sanitizers { - // This variable is used by the sanitizer runtime crates, e.g. - // rustc_lsan, to build the sanitizer runtime from C code + // This variable is used by the sanitizer runtime crates, e.g., + // `rustc_lsan`, to build the sanitizer runtime from C code // When this variable is missing, those crates won't compile the C code, // so we don't set this variable during stage0 where llvm-config is // missing - // We also only build the runtimes when --enable-sanitizers (or its + // We also only build the runtimes when `--enable-sanitizers` (or its // config.toml equivalent) is used let llvm_config = builder.ensure(native::Llvm { target: builder.config.build, @@ -895,7 +895,7 @@ impl Step for Assemble { run.never() } - /// Prepare a new compiler from the artifacts in `stage` + /// Prepares a new compiler from the artifacts in `stage` /// /// This will assemble a compiler in `build/$host/stage$stage`. The compiler /// must have been previously produced by the `stage - 1` builder.build @@ -921,17 +921,17 @@ impl Step for Assemble { // produce some other architecture compiler we need to start from // `build` to get there. // - // FIXME: Perhaps we should download those libraries? - // It would make builds faster... + // FIXME: perhaps we should download those libraries? + // It would certainly make builds faster. // - // FIXME: It may be faster if we build just a stage 1 compiler and then - // use that to bootstrap this compiler forward. + // FIXME: it may be faster if we build just a stage 1 compiler and then + // use that to bootstrap this compiler forward. let build_compiler = builder.compiler(target_compiler.stage - 1, builder.config.build); // Build the libraries for this compiler to link to (i.e., the libraries // it uses at runtime). NOTE: Crates the target compiler compiles don't - // link to these. (FIXME: Is that correct? It seems to be correct most + // link to these. (FIXME: is that correct? It seems to be correct most // of the time but I think we do link to these for stage2/bin compilers // when not performing a full bootstrap). builder.ensure(Rustc { @@ -958,7 +958,7 @@ impl Step for Assemble { let host = target_compiler.host; builder.info(&format!("Assembling stage{} compiler ({})", stage, host)); - // Link in all dylibs to the libdir + // Link in all dylibs to the libdir. let sysroot = builder.sysroot(target_compiler); let sysroot_libdir = sysroot.join(libdir(&*host)); t!(fs::create_dir_all(&sysroot_libdir)); @@ -979,7 +979,7 @@ impl Step for Assemble { dist::maybe_install_llvm_dylib(builder, target_compiler.host, &sysroot); - // Link the compiler binary itself into place + // Link the compiler binary itself into place. let out_dir = builder.cargo_out(build_compiler, Mode::Rustc, host); let rustc = out_dir.join(exe("rustc_binary", &*host)); let bindir = sysroot.join("bin"); @@ -992,7 +992,7 @@ impl Step for Assemble { } } -/// Link some files into a rustc sysroot. +/// Links some files into a rustc sysroot. /// /// For a particular stage this will link the file listed in `stamp` into the /// `sysroot_dst` provided. @@ -1013,16 +1013,16 @@ pub fn run_cargo(builder: &Builder, return Vec::new(); } - // `target_root_dir` looks like $dir/$target/release + // `target_root_dir` looks like `$dir/$target/release`. let target_root_dir = stamp.parent().unwrap(); - // `target_deps_dir` looks like $dir/$target/release/deps + // `target_deps_dir` looks like `$dir/$target/release/deps`. let target_deps_dir = target_root_dir.join("deps"); - // `host_root_dir` looks like $dir/release + // `host_root_dir` looks like `$dir/release`. let host_root_dir = target_root_dir.parent().unwrap() // chop off `release` .parent().unwrap() // chop off `$target` .join(target_root_dir.file_name().unwrap()); - // Spawn Cargo slurping up its JSON output. We'll start building up the + // Spawn Cargo, collecting its JSON output. We'll start building up the // `deps` array of all files it generated along with a `toplevel` array of // files we need to probe for later. let mut deps = Vec::new(); @@ -1033,7 +1033,7 @@ pub fn run_cargo(builder: &Builder, _ => return, }; for filename in filenames { - // Skip files like executables + // Skip files like executables. if !filename.ends_with(".rlib") && !filename.ends_with(".lib") && !is_dylib(&filename) && @@ -1056,7 +1056,7 @@ pub fn run_cargo(builder: &Builder, continue; } - // Otherwise this was a "top level artifact" which right now doesn't + // Otherwise, this was a "top level artifact" which right now doesn't // have a hash in the name, but there's a version of this file in // the `deps` folder which *does* have a hash in the name. That's // the one we'll want to we'll probe for it later. @@ -1080,7 +1080,7 @@ pub fn run_cargo(builder: &Builder, exit(1); } - // Ok now we need to actually find all the files listed in `toplevel`. We've + // Now we need to actually find all the files listed in `toplevel`. We've // got a list of prefix/extensions and we basically just need to find the // most recent file in the `deps` folder corresponding to each one. let contents = t!(target_deps_dir.read_dir()) @@ -1168,7 +1168,7 @@ pub fn stream_cargo( Err(e) => panic!("failed to execute command: {:?}\nerror: {}", cargo, e), }; - // Spawn Cargo slurping up its JSON output. We'll start building up the + // Spawn Cargo, collecting its JSON output. We'll start building up the // `deps` array of all files it generated along with a `toplevel` array of // files we need to probe for later. let stdout = BufReader::new(child.stdout.take().unwrap()); @@ -1176,7 +1176,7 @@ pub fn stream_cargo( let line = t!(line); match serde_json::from_str::(&line) { Ok(msg) => cb(msg), - // If this was informational, just print it out and continue + // If this was informational, just print it out and continue. Err(_) => println!("{}", line) } } diff --git a/src/bootstrap/dist.rs b/src/bootstrap/dist.rs index bc1fdad356be3..30cfbd61773c0 100644 --- a/src/bootstrap/dist.rs +++ b/src/bootstrap/dist.rs @@ -2222,8 +2222,8 @@ impl Step for Lldb { } } - // The lldb scripts might be installed in lib/python$version - // or in lib64/python$version. If lib64 exists, use it; + // The lldb scripts might be installed in `lib/python$version` + // or in `lib64/python$version`. If lib64 exists, use it; // otherwise lib. let libdir = builder.llvm_out(target).join("lib64"); let (libdir, libdir_name) = if libdir.exists() { diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs index d14b23e5988cb..dbfefe9c3e5f0 100644 --- a/src/bootstrap/doc.rs +++ b/src/bootstrap/doc.rs @@ -567,14 +567,14 @@ impl Step for Test { compiler }; - // Build libstd docs so that we generate relative links + // Build libstd docs so that we generate relative links. builder.ensure(Std { stage, target }); builder.ensure(compile::Test { compiler, target }); let out_dir = builder.stage_out(compiler, Mode::Test) .join(target).join("doc"); - // See docs in std above for why we symlink + // See docs in std above for why we symlink. let my_out = builder.crate_doc_out(target); t!(symlink_dir_force(&builder.config, &my_out, &out_dir)); @@ -633,14 +633,14 @@ impl Step for WhitelistedRustc { compiler }; - // Build libstd docs so that we generate relative links + // Build libstd docs so that we generate relative links. builder.ensure(Std { stage, target }); builder.ensure(compile::Rustc { compiler, target }); let out_dir = builder.stage_out(compiler, Mode::Rustc) .join(target).join("doc"); - // See docs in std above for why we symlink + // See docs in std above for why we symlink. let my_out = builder.crate_doc_out(target); t!(symlink_dir_force(&builder.config, &my_out, &out_dir)); @@ -737,7 +737,7 @@ impl Step for Rustc { for krate in &compiler_crates { // Create all crate output directories first to make sure rustdoc uses // relative links. - // FIXME: Cargo should probably do this itself. + // FIXME: cargo should probably do this itself. t!(fs::create_dir_all(out_dir.join(krate))); cargo.arg("-p").arg(krate); } @@ -879,7 +879,7 @@ impl Step for ErrorIndex { index.arg("html"); index.arg(out.join("error-index.html")); - // FIXME: shouldn't have to pass this env var + // FIXME: shouldn't have to pass this env var. index.env("CFG_BUILD", &builder.config.build) .env("RUSTC_ERROR_METADATA_DST", builder.extended_error_dir()); @@ -936,7 +936,7 @@ fn symlink_dir_force(config: &Config, src: &Path, dst: &Path) -> io::Result<()> if m.file_type().is_dir() { fs::remove_dir_all(dst)?; } else { - // handle directory junctions on windows by falling back to + // Handle directory junctions on Windows by falling back to // `remove_dir`. fs::remove_file(dst).or_else(|_| { fs::remove_dir(dst) diff --git a/src/bootstrap/flags.rs b/src/bootstrap/flags.rs index 0f9a4271ac062..1cb2f44b4978d 100644 --- a/src/bootstrap/flags.rs +++ b/src/bootstrap/flags.rs @@ -33,7 +33,7 @@ pub struct Flags { pub rustc_error_format: Option, pub dry_run: bool, - // true => deny + // `true` => deny pub warnings: Option, } @@ -139,8 +139,8 @@ To learn more about a subcommand, run `./x.py -h`" // We can't use getopt to parse the options until we have completed specifying which // options are valid, but under the current implementation, some options are conditional on - // the subcommand. Therefore we must manually identify the subcommand first, so that we can - // complete the definition of the options. Then we can use the getopt::Matches object from + // the subcommand. Therefore, we must manually identify the subcommand first, so that we can + // complete the definition of the options. Then we can use the `getopt::Matches` object from // there on out. let subcommand = args.iter().find(|&s| { (s == "build") diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs index f48f9ee752e93..53fccc066d48a 100644 --- a/src/bootstrap/native.rs +++ b/src/bootstrap/native.rs @@ -197,8 +197,8 @@ impl Step for Llvm { cfg.define("LLDB_CODESIGN_IDENTITY", ""); cfg.define("LLDB_NO_DEBUGSERVER", "ON"); } else { - // LLDB requires libxml2; but otherwise we want it to be disabled. - // See https://github.com/rust-lang/rust/pull/50104 + // LLDB requires libxml2, but otherwise we want it to be disabled. + // See PR #50104. cfg.define("LLVM_ENABLE_LIBXML2", "OFF"); } diff --git a/src/bootstrap/sanity.rs b/src/bootstrap/sanity.rs index ff4fb85bbfad3..eae408888625b 100644 --- a/src/bootstrap/sanity.rs +++ b/src/bootstrap/sanity.rs @@ -62,13 +62,13 @@ pub fn check(build: &mut Build) { // On Windows, quotes are invalid characters for filename paths, and if // one is present as part of the PATH then that can lead to the system // being unable to identify the files properly. See - // https://github.com/rust-lang/rust/issues/34959 for more details. + // issue #34959 for more details. if cfg!(windows) && path.to_string_lossy().contains('\"') { panic!("PATH contains invalid character '\"'"); } let mut cmd_finder = Finder::new(); - // If we've got a git directory we're gonna need git to update + // If we've got a Git directory we're gonna need git to update // submodules and learn about various other aspects. if build.rust_info.is_git() { cmd_finder.must_have("git"); @@ -122,7 +122,7 @@ pub fn check(build: &mut Build) { // We're gonna build some custom C code here and there, host triples // also build some C++ shims for LLVM so we need a C++ compiler. for target in &build.targets { - // On emscripten we don't actually need the C compiler to just + // On Emscripten we don't actually need the C compiler to just // build the target artifacts, only for testing. For the sake // of easier bot configuration, just skip detection. if target.contains("emscripten") { diff --git a/src/bootstrap/test.rs b/src/bootstrap/test.rs index a882550f734f4..f2e312ae828d2 100644 --- a/src/bootstrap/test.rs +++ b/src/bootstrap/test.rs @@ -47,7 +47,7 @@ impl From for TestKind { } impl TestKind { - // Return the cargo subcommand for this test kind + // Returns the cargo subcommand for this test kind. fn subcommand(self) -> &'static str { match self { TestKind::Test => "test", @@ -517,8 +517,8 @@ impl Step for Clippy { } fn path_for_cargo(builder: &Builder, compiler: Compiler) -> OsString { - // Configure PATH to find the right rustc. NB. we have to use PATH - // and not RUSTC because the Cargo test suite has tests that will + // Configure `PATH` to find the right rustc. N.B., we have to use PATH + // and not `RUSTC` because the Cargo test suite has tests that will // fail if rustc is not spelled `rustc`. let path = builder.sysroot(compiler).join("bin"); let old_path = env::var_os("PATH").unwrap_or_default(); @@ -951,7 +951,7 @@ impl Step for Compiletest { } if suite.ends_with("fulldeps") || - // FIXME: Does pretty need librustc compiled? Note that there are + // FIXME: does pretty need librustc compiled? Note that there are // fulldeps test suites with mode = pretty as well. mode == "pretty" { @@ -971,7 +971,7 @@ impl Step for Compiletest { builder.ensure(compile::Std { compiler, target: compiler.host }); } - // HACK(eddyb) ensure that `libproc_macro` is available on the host. + // HACK(eddyb): ensure that `libproc_macro` is available on the host. builder.ensure(compile::Test { compiler, target: compiler.host }); // Also provide `rust_test_helpers` for the host. builder.ensure(native::TestHelpers { target: compiler.host }); @@ -1976,8 +1976,8 @@ impl Step for Bootstrap { .env("RUSTC", &builder.initial_rustc); if let Some(flags) = option_env!("RUSTFLAGS") { // Use the same rustc flags for testing as for "normal" compilation, - // so that Cargo doesn’t recompile the entire dependency graph every time: - // https://github.com/rust-lang/rust/issues/49215 + // so that Cargo doesn’t recompile the entire dependency graph every time + // (issue #49215). cmd.env("RUSTFLAGS", flags); } if !builder.fail_fast { diff --git a/src/liballoc/alloc.rs b/src/liballoc/alloc.rs index ec652df3b37a4..e3c783f23b8c6 100644 --- a/src/liballoc/alloc.rs +++ b/src/liballoc/alloc.rs @@ -1,4 +1,4 @@ -//! Memory allocation APIs +//! Memory allocation APIs. #![stable(feature = "alloc_module", since = "1.28.0")] @@ -11,7 +11,7 @@ use core::usize; pub use core::alloc::*; extern "Rust" { - // These are the magic symbols to call the global allocator. rustc generates + // These are the magic symbols to call the global allocator. rustc generates // them from the `#[global_allocator]` attribute if there is one, or uses the // default implementations in libstd (`__rdl_alloc` etc in `src/libstd/alloc.rs`) // otherwise. @@ -38,7 +38,7 @@ extern "Rust" { #[derive(Copy, Clone, Default, Debug)] pub struct Global; -/// Allocate memory with the global allocator. +/// Allocates memory with the global allocator. /// /// This function forwards calls to the [`GlobalAlloc::alloc`] method /// of the allocator registered with the `#[global_allocator]` attribute @@ -72,7 +72,7 @@ pub unsafe fn alloc(layout: Layout) -> *mut u8 { __rust_alloc(layout.size(), layout.align()) } -/// Deallocate memory with the global allocator. +/// Deallocates memory with the global allocator. /// /// This function forwards calls to the [`GlobalAlloc::dealloc`] method /// of the allocator registered with the `#[global_allocator]` attribute @@ -90,7 +90,7 @@ pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) { __rust_dealloc(ptr, layout.size(), layout.align()) } -/// Reallocate memory with the global allocator. +/// Reallocates memory with the global allocator. /// /// This function forwards calls to the [`GlobalAlloc::realloc`] method /// of the allocator registered with the `#[global_allocator]` attribute @@ -108,7 +108,7 @@ pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 __rust_realloc(ptr, layout.size(), layout.align(), new_size) } -/// Allocate zero-initialized memory with the global allocator. +/// Allocates zero-initialized memory with the global allocator. /// /// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method /// of the allocator registered with the `#[global_allocator]` attribute @@ -170,6 +170,7 @@ unsafe impl Alloc for Global { } /// The allocator for unique pointers. +// // This function must not unwind. If it does, MIR codegen will fail. #[cfg(not(test))] #[lang = "exchange_malloc"] @@ -194,14 +195,15 @@ pub(crate) unsafe fn box_free(ptr: Unique) { let ptr = ptr.as_ptr(); let size = size_of_val(&*ptr); let align = min_align_of_val(&*ptr); - // We do not allocate for Box when T is ZST, so deallocation is also not necessary. + // We do not allocate for `Box` when `T` is zero-sized, so deallocation is also not + // necessary. if size != 0 { let layout = Layout::from_size_align_unchecked(size, align); dealloc(ptr as *mut u8, layout); } } -/// Abort on memory allocation error or failure. +/// Aborts on memory allocation error or failure. /// /// Callers of memory allocation APIs wishing to abort computation /// in response to an allocation error are encouraged to call this function, diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 51549f92d4dbf..58ea870fa4626 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -275,7 +275,7 @@ impl Box { #[stable(feature = "rust1", since = "1.0.0")] unsafe impl<#[may_dangle] T: ?Sized> Drop for Box { fn drop(&mut self) { - // FIXME: Do nothing, drop is currently performed by compiler. + // FIXME: do nothing; drop is currently performed by compiler. } } diff --git a/src/liballoc/collections/binary_heap.rs b/src/liballoc/collections/binary_heap.rs index 3b94379b58f8f..1ec9229548e3d 100644 --- a/src/liballoc/collections/binary_heap.rs +++ b/src/liballoc/collections/binary_heap.rs @@ -945,7 +945,7 @@ impl fmt::Debug for Iter<'_, T> { } } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { diff --git a/src/liballoc/collections/btree/map.rs b/src/liballoc/collections/btree/map.rs index 5ec5064b73515..795c77d5a7363 100644 --- a/src/liballoc/collections/btree/map.rs +++ b/src/liballoc/collections/btree/map.rs @@ -887,7 +887,7 @@ impl BTreeMap { /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { - // FIXME(@porglezomp) Avoid allocating if we don't insert + // FIXME(porglezomp): avoid allocating if we don't insert. self.ensure_root_is_owned(); match search::search_tree(self.root.as_mut(), &key) { Found(handle) => { diff --git a/src/liballoc/collections/btree/node.rs b/src/liballoc/collections/btree/node.rs index eb0667228d1ff..4fd564b582652 100644 --- a/src/liballoc/collections/btree/node.rs +++ b/src/liballoc/collections/btree/node.rs @@ -1,4 +1,4 @@ -// This is an attempt at an implementation following the ideal +// This is an attempt at an implementation of the following ideal: // // ``` // struct BTreeMap { @@ -250,7 +250,7 @@ impl Root { NodeRef { height: self.height, node: self.node.as_ptr(), - root: ptr::null_mut(), // FIXME: Is there anything better to do here? + root: ptr::null_mut(), // FIXME: is there anything better to do here? _marker: PhantomData, } } @@ -305,7 +305,7 @@ impl Root { } } -// N.B. `NodeRef` is always covariant in `K` and `V`, even when the `BorrowType` +// N.B., `NodeRef` is always covariant in `K` and `V`, even when the `BorrowType` // is `Mut`. This is technically wrong, but cannot result in any unsafety due to // internal use of `NodeRef` because we stay completely generic over `K` and `V`. // However, whenever a public type wraps `NodeRef`, make sure that it has the @@ -322,8 +322,8 @@ impl Root { /// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the /// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the /// `NodeRef` could be pointing to either type of node. -/// Note that in case of a leaf node, this might still be the shared root! Only turn -/// this into a `LeafNode` reference if you know it is not a root! Shared references +/// Note that in case of a leaf node, this might still be the shared root! Only turn +/// this into a `LeafNode` reference if you know it is not a root! Shared references /// must be dereferencable *for the entire size of their pointee*, so `&InternalNode` /// pointing to the shared root is UB. /// Turning this into a `NodeHeader` is always safe. @@ -549,7 +549,7 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { /// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut` /// can easily be used to make the original mutable pointer dangling, or, in the case /// of a reborrowed handle, out of bounds. - // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts + // FIXME(gereeter): consider adding yet another type parameter to `NodeRef` that restricts // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. unsafe fn reborrow_mut(&mut self) -> NodeRef, K, V, Type> { NodeRef { @@ -562,7 +562,7 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { /// Returns a raw ptr to avoid asserting exclusive access to the entire node. fn as_leaf_mut(&mut self) -> *mut LeafNode { - // We are mutable, so we cannot be the root, so accessing this as a leaf is okay. + // We are mutable, so we cannot be the root, so accessing this as a leaf is ok. self.node.as_ptr() } @@ -578,14 +578,14 @@ impl<'a, K, V, Type> NodeRef, K, V, Type> { impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { fn into_key_slice(self) -> &'a [K] { // We have to be careful here because we might be pointing to the shared root. - // In that case, we must not create an `&LeafNode`. We could just return + // In that case, we must not create an `&LeafNode`. We could just return // an empty slice whenever the length is 0 (this includes the shared root), // but we want to avoid that run-time check. // Instead, we create a slice pointing into the node whenever possible. // We can sometimes do this even for the shared root, as the slice will be - // empty. We cannot *always* do this because if the type is too highly + // empty. We cannot *always* do this because if the type is too highly // aligned, the offset of `keys` in a "full node" might be outside the bounds - // of the header! So we do an alignment check first, that will be + // of the header! So we do an alignment check first, that will be // evaluated at compile-time, and only do any run-time check in the rare case // that the alignment is very big. if mem::align_of::() > mem::align_of::>() && self.is_shared_root() { @@ -594,18 +594,18 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { // Thanks to the alignment check above, we know that `keys` will be // in-bounds of some allocation even if this is the shared root! // (We might be one-past-the-end, but that is allowed by LLVM.) - // Getting the pointer is tricky though. `NodeHeader` does not have a `keys` + // Getting the pointer is tricky though. `NodeHeader` does not have a `keys` // field because we want its size to not depend on the alignment of `K` - // (needed becuase `as_header` should be safe). We cannot call `as_leaf` + // (needed becuase `as_header` should be safe). We cannot call `as_leaf` // because we might be the shared root. // For this reason, `NodeHeader` has this `K2` parameter (that's usually `()` // and hence just adds a size-0-align-1 field, not affecting layout). // We know that we can transmute `NodeHeader` to `NodeHeader` // because we did the alignment check above, and hence `NodeHeader` - // is not bigger than `NodeHeader`! Then we can use `NodeHeader` + // is not bigger than `NodeHeader`! Then we can use `NodeHeader` // to compute the pointer where the keys start. // This entire hack will become unnecessary once - // lands, then we can just take a raw + // RFC #2582 lands, then we can just take a raw // pointer to the `keys` field of `*const InternalNode`. // This is a non-debug-assert because it can be completely compile-time evaluated. @@ -620,7 +620,7 @@ impl<'a, K: 'a, V: 'a, Type> NodeRef, K, V, Type> { fn into_val_slice(self) -> &'a [V] { debug_assert!(!self.is_shared_root()); - // We cannot be the root, so `as_leaf` is okay + // We cannot be the root, so `as_leaf` is ok unsafe { slice::from_raw_parts( MaybeUninit::first_ptr(&self.as_leaf().vals), @@ -950,7 +950,7 @@ impl<'a, K, V, NodeType, HandleType> /// `into_root_mut`) mess with the root of the tree, the result of `reborrow_mut` /// can easily be used to make the original mutable pointer dangling, or, in the case /// of a reborrowed handle, out of bounds. - // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef` that restricts + // FIXME(gereeter): consider adding yet another type parameter to `NodeRef` that restricts // the use of `ascend` and `into_root_mut` on reborrowed pointers, preventing this unsafety. pub unsafe fn reborrow_mut(&mut self) -> Handle, K, V, NodeType>, HandleType> { diff --git a/src/liballoc/collections/linked_list.rs b/src/liballoc/collections/linked_list.rs index afd8078cdd753..54c5cbe58ca50 100644 --- a/src/liballoc/collections/linked_list.rs +++ b/src/liballoc/collections/linked_list.rs @@ -69,7 +69,7 @@ impl fmt::Debug for Iter<'_, T> { } } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Iter<'_, T> { fn clone(&self) -> Self { @@ -1383,7 +1383,7 @@ mod tests { // This caused the RHS's dtor to walk up into the LHS at drop and delete all of // its nodes. // - // https://github.com/rust-lang/rust/issues/26021 + // Issue #26021. let mut v1 = LinkedList::new(); v1.push_front(1); v1.push_front(1); diff --git a/src/liballoc/collections/vec_deque.rs b/src/liballoc/collections/vec_deque.rs index b6fdaa8999212..0122511c08593 100644 --- a/src/liballoc/collections/vec_deque.rs +++ b/src/liballoc/collections/vec_deque.rs @@ -151,7 +151,7 @@ impl VecDeque { wrap_index(idx.wrapping_sub(subtrahend), self.cap()) } - /// Copies a contiguous block of memory len long from src to dst + /// Copies a contiguous block of memory len long from `src` to `dst`. #[inline] unsafe fn copy(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap(), @@ -171,7 +171,7 @@ impl VecDeque { len); } - /// Copies a contiguous block of memory len long from src to dst + /// Copies a contiguous block of memory len long from `src` to `dst`. #[inline] unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) { debug_assert!(dst + len <= self.cap(), @@ -191,9 +191,9 @@ impl VecDeque { len); } - /// Copies a potentially wrapping block of memory len long from src to dest. - /// (abs(dst - src) + len) must be no larger than cap() (There must be at - /// most one continuous overlapping region between src and dest). + /// Copies a potentially wrapping block of memory len long from `src` to `dest`. + /// `abs(dst - src) + len` must be no larger than `cap()`. (There must be at + /// most one continuous overlapping region between `src` and `dest`). unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) { #[allow(dead_code)] fn diff(a: usize, b: usize) -> usize { @@ -219,7 +219,7 @@ impl VecDeque { match (dst_after_src, src_wraps, dst_wraps) { (_, false, false) => { - // src doesn't wrap, dst doesn't wrap + // `src` doesn't wrap, `dst` doesn't wrap. // // S . . . // 1 [_ _ A A B B C C _] @@ -229,7 +229,7 @@ impl VecDeque { self.copy(dst, src, len); } (false, false, true) => { - // dst before src, src doesn't wrap, dst wraps + // `dst` before `src`, `src` doesn't wrap, `dst` wraps. // // S . . . // 1 [A A B B _ _ _ C C] @@ -241,7 +241,7 @@ impl VecDeque { self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len); } (true, false, true) => { - // src before dst, src doesn't wrap, dst wraps + // `src` before `dst`, `src` doesn't wrap, `dst` wraps. // // S . . . // 1 [C C _ _ _ A A B B] @@ -253,7 +253,7 @@ impl VecDeque { self.copy(dst, src, dst_pre_wrap_len); } (false, true, false) => { - // dst before src, src wraps, dst doesn't wrap + // `dst` before `src`, `src` wraps, `dst` doesn't wrap. // // . . S . // 1 [C C _ _ _ A A B B] @@ -265,7 +265,7 @@ impl VecDeque { self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len); } (true, true, false) => { - // src before dst, src wraps, dst doesn't wrap + // `src` before `dst`, `src` wraps, `dst` doesn't wrap. // // . . S . // 1 [A A B B _ _ _ C C] @@ -277,7 +277,7 @@ impl VecDeque { self.copy(dst, src, src_pre_wrap_len); } (false, true, true) => { - // dst before src, src wraps, dst wraps + // `dst` before `src`, `src` wraps, `dst` wraps. // // . . . S . // 1 [A B C D _ E F G H] @@ -293,7 +293,7 @@ impl VecDeque { self.copy(0, delta, len - dst_pre_wrap_len); } (true, true, true) => { - // src before dst, src wraps, dst wraps + // `src` before `dst`, `src` wraps, `dst` wraps. // // . . S . . // 1 [A B C D _ E F G H] @@ -312,7 +312,7 @@ impl VecDeque { } /// Frobs the head and tail sections around to handle the fact that we - /// just reallocated. Unsafe because it trusts old_cap. + /// just reallocated. Unsafe because it trusts `old_cap`. #[inline] unsafe fn handle_cap_increase(&mut self, old_cap: usize) { let new_cap = self.cap(); @@ -333,7 +333,7 @@ impl VecDeque { if self.tail <= self.head { // A - // Nop + // No-op. } else if self.head < old_cap - self.tail { // B self.copy_nonoverlapping(old_cap, 0, self.head); @@ -1014,7 +1014,7 @@ impl VecDeque { tail: drain_tail, head: drain_head, // Crucially, we only create shared references from `self` here and read from - // it. We do not write to `self` nor reborrow to a mutable reference. + // it. We do not write to `self` nor reborrow to a mutable reference. // Hence the raw pointer we created above, for `deque`, remains valid. ring: unsafe { self.buffer_as_slice() }, }, @@ -2130,7 +2130,7 @@ impl fmt::Debug for Iter<'_, T> { } } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T> Clone for Iter<'a, T> { fn clone(&self) -> Iter<'a, T> { @@ -3125,5 +3125,4 @@ mod tests { assert_eq!(*a, 2); } } - } diff --git a/src/liballoc/fmt.rs b/src/liballoc/fmt.rs index d2ba9b001916c..96de7b98f6c36 100644 --- a/src/liballoc/fmt.rs +++ b/src/liballoc/fmt.rs @@ -205,7 +205,7 @@ //! These two formatting traits have distinct purposes: //! //! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully -//! represented as a UTF-8 string at all times. It is **not** expected that +//! represented as a UTF-8 string at all times. It is *not* expected that //! all types implement the [`Display`] trait. //! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types. //! Output will typically represent the internal state as faithfully as possible. diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 95b9dacf8565a..8bb73b12c345b 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -135,7 +135,7 @@ pub mod alloc; // Primitive types using the heaps above // Need to conditionally define the mod from `boxed.rs` to avoid -// duplicating the lang-items when building in test cfg; but also need +// duplicating the lang-items when building in test cfg, but also need // to allow code to have `use boxed::Box;` declarations. #[cfg(not(test))] pub mod boxed; diff --git a/src/liballoc/macros.rs b/src/liballoc/macros.rs index eb3410078513d..e32f7e717e324 100644 --- a/src/liballoc/macros.rs +++ b/src/liballoc/macros.rs @@ -46,7 +46,7 @@ macro_rules! vec { ($($x:expr,)*) => (vec![$($x),*]) } -// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is +// HACK(japaric): with `cfg(test)`, the inherent `[T]::into_vec` method, which is // required for this macro definition, is not available. Instead use the // `slice::into_vec` function which is only available with cfg(test) // NB see the slice::hack module in slice.rs for more information diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index fe28fe5095cce..6882816899793 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -317,7 +317,7 @@ impl RawVec { } } None => { - // skip to 4 because tiny Vec's are dumb; but not if that + // skip to 4 because tiny Vec's are dumb, but not if that // would cause overflow let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; match self.a.alloc_array::(new_cap) { @@ -350,14 +350,15 @@ impl RawVec { let elem_size = mem::size_of::(); let old_layout = match self.current_layout() { Some(layout) => layout, - None => return false, // nothing to double + // Nothing to double. + None => return false, }; - // since we set the capacity to usize::MAX when elem_size is - // 0, getting to here necessarily means the RawVec is overfull. + // Since we set the capacity to `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. assert!(elem_size != 0, "capacity overflow"); - // Since we guarantee that we never allocate more than isize::MAX + // Since we guarantee that we never allocate more than `isize::MAX` // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so // this can't overflow. // @@ -533,11 +534,11 @@ impl RawVec { .unwrap_or_else(|_| capacity_overflow()); // Here, `cap < used_cap + needed_extra_cap <= new_cap` - // (regardless of whether `self.cap - used_cap` wrapped). - // Therefore we can safely call grow_in_place. + // (regardless of whether `self.cap - used_cap` wrapped), therefore we can safely + // call `grow_in_place`. let new_layout = Layout::new::().repeat(new_cap).unwrap().0; - // FIXME: may crash and burn on over-reserve + // FIXME: may crash and burn on over-reserving. alloc_guard(new_layout.size()).unwrap_or_else(|_| capacity_overflow()); match self.a.grow_in_place( NonNull::from(self.ptr).cast(), old_layout, new_layout.size(), @@ -566,21 +567,21 @@ impl RawVec { pub fn shrink_to_fit(&mut self, amount: usize) { let elem_size = mem::size_of::(); - // Set the `cap` because they might be about to promote to a `Box<[T]>` + // Set the `cap` because they might be about to promote to a `Box<[T]>`. if elem_size == 0 { self.cap = amount; return; } - // This check is my waterloo; it's the only thing Vec wouldn't have to do. + // This check is my waterloo; it's the only thing `Vec` wouldn't have to do. assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); if amount == 0 { // We want to create a new zero-length vector within the - // same allocator. We use ptr::write to avoid an + // same allocator. We use `ptr::write` to avoid an // erroneous attempt to drop the contents, and we use - // ptr::read to sidestep condition against destructuring - // types that implement Drop. + // `ptr::read` to sidestep condition against destructuring + // types that implement `Drop`. unsafe { let a = ptr::read(&self.a as *const A); @@ -651,7 +652,7 @@ impl RawVec { return Ok(()); } - // Nothing we can really do about these checks :( + // Nothing we can really do about these checks. let new_cap = match strategy { Exact => used_cap.checked_add(needed_extra_cap).ok_or(CapacityOverflow)?, Amortized => self.amortized_new_size(used_cap, needed_extra_cap)?, @@ -786,7 +787,8 @@ mod tests { let a = BoundedAlloc { fuel: 500 }; let mut v: RawVec = RawVec::with_capacity_in(50, a); assert_eq!(v.a.fuel, 450); - v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) + // Causes a realloc, thus using 50 + 150 = 200 units of fuel. + v.reserve(50, 150); assert_eq!(v.a.fuel, 250); } @@ -794,7 +796,7 @@ mod tests { fn reserve_does_not_overallocate() { { let mut v: RawVec = RawVec::new(); - // First `reserve` allocates like `reserve_exact` + // First `reserve` allocates like `reserve_exact`. v.reserve(0, 9); assert_eq!(9, v.cap()); } @@ -817,7 +819,7 @@ mod tests { // 3 is less than half of 12, so `reserve` must grow // exponentially. At the time of writing this test grow // factor is 2, so new capacity is 24, however, grow factor - // of 1.5 is OK too. Hence `>= 18` in assert. + // of 1.5 is ok too. Hence `>= 18` in assert. assert!(v.cap() >= 12 + 12 / 2); } } diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 12f75d84211e6..a53a7d49c5816 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -1220,7 +1220,7 @@ pub struct Weak { // This is a `NonNull` to allow optimizing the size of this type in enums, // but it is not necessarily a valid pointer. // `Weak::new` sets this to `usize::MAX` so that it doesn’t need - // to allocate space on the heap. That's not a value a real pointer + // to allocate space on the heap. That's not a value a real pointer // will ever have because RcBox has alignment at least 2. ptr: NonNull>, } @@ -1482,9 +1482,9 @@ impl Default for Weak { } } -// NOTE: We checked_add here to deal with mem::forget safely. In particular -// if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then -// you can free the allocation while outstanding Rcs (or Weaks) exist. +// NOTE: we checked_add here to deal with `mem::forget` safely. In particular +// if you `mem::forget` `Rc`s (or `Weak`s), the ref-count can overflow, and then +// you can free the allocation while outstanding `Rc`s (or `Weak`s) exist. // We abort because this is such a degenerate scenario that we don't care about // what happens -- no real program should ever experience this. // diff --git a/src/liballoc/slice.rs b/src/liballoc/slice.rs index c4f4a80a017df..1c32a155667f1 100644 --- a/src/liballoc/slice.rs +++ b/src/liballoc/slice.rs @@ -122,12 +122,12 @@ pub use core::slice::{RChunks, RChunksMut, RChunksExact, RChunksExactMut}; // Basic slice extension methods //////////////////////////////////////////////////////////////////////////////// -// HACK(japaric) needed for the implementation of `vec!` macro during testing +// HACK(japaric): needed for the implementation of `vec!` macro during testing // NB see the hack module in this file for more details #[cfg(test)] pub use hack::into_vec; -// HACK(japaric) needed for the implementation of `Vec::clone` during testing +// HACK(japaric): needed for the implementation of `Vec::clone` during testing // NB see the hack module in this file for more details #[cfg(test)] pub use hack::to_vec; @@ -831,7 +831,7 @@ unsafe fn merge(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F) impl Drop for MergeHole { fn drop(&mut self) { - // `T` is not a zero-sized type, so it's okay to divide by its size. + // `T` is not a zero-sized type, so it's ok to divide by its size. let len = (self.end as usize - self.start as usize) / mem::size_of::(); unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); } } diff --git a/src/liballoc/str.rs b/src/liballoc/str.rs index a36804bddff32..011de06560677 100644 --- a/src/liballoc/str.rs +++ b/src/liballoc/str.rs @@ -356,7 +356,7 @@ impl str { // This is the only conditional (contextual) but language-independent mapping // in `SpecialCasing.txt`, // so hard-code it rather than have a generic "condition" mechanism. - // See https://github.com/rust-lang/rust/issues/26035 + // See issue #26035. map_uppercase_sigma(self, i, &mut s) } else { match conversions::to_lower(c) { @@ -376,7 +376,7 @@ impl str { return s; fn map_uppercase_sigma(from: &str, i: usize, to: &mut String) { - // See http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992 + // See . // for the definition of `Final_Sigma`. debug_assert!('Σ'.len_utf8() == 2); let is_word_final = case_ignoreable_then_cased(from[..i].chars().rev()) && diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index b7d7995b540ba..fc18642d73248 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -234,7 +234,7 @@ pub struct Weak { // This is a `NonNull` to allow optimizing the size of this type in enums, // but it is not necessarily a valid pointer. // `Weak::new` sets this to `usize::MAX` so that it doesn’t need - // to allocate space on the heap. That's not a value a real pointer + // to allocate space on the heap. That's not a value a real pointer // will ever have because RcBox has alignment at least 2. ptr: NonNull>, } @@ -283,8 +283,8 @@ impl Arc { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn new(data: T) -> Arc { - // Start the weak pointer count as 1 which is the weak pointer that's - // held by all the strong pointers (kinda), see std/rc.rs for more info + // Start the weak pointer count at `1`, which is the weak pointer that's + // held by all the strong pointers (kinda), see `std/rc.rs` for more info. let x: Box<_> = box ArcInner { strong: atomic::AtomicUsize::new(1), weak: atomic::AtomicUsize::new(1), @@ -449,27 +449,26 @@ impl Arc { /// ``` #[stable(feature = "arc_weak", since = "1.4.0")] pub fn downgrade(this: &Self) -> Weak { - // This Relaxed is OK because we're checking the value in the CAS - // below. + // This `Relaxed` is ok because we're checking the value in the CAS below. let mut cur = this.inner().weak.load(Relaxed); loop { - // check if the weak counter is currently "locked"; if so, spin. + // Check if the weak counter is currently "locked"; if so, spin. if cur == usize::MAX { cur = this.inner().weak.load(Relaxed); continue; } // NOTE: this code currently ignores the possibility of overflow - // into usize::MAX; in general both Rc and Arc need to be adjusted + // into `usize::MAX`; in general both `Rc` and `Arc` need to be adjusted // to deal with overflow. - // Unlike with Clone(), we need this to be an Acquire read to + // Unlike with `Clone()`, we need this to be an `Acquire` read to // synchronize with the write coming from `is_unique`, so that the // events prior to that write happen before this read. match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) { Ok(_) => { - // Make sure we do not create a dangling Weak + // Make sure we do not create a dangling `Weak`. debug_assert!(!is_dangling(this.ptr)); return Weak { ptr: this.ptr }; } @@ -749,7 +748,7 @@ impl Clone for Arc { // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) let old_size = self.inner().strong.fetch_add(1, Relaxed); - // However we need to guard against massive refcounts in case someone + // However we need to guard against massive ref counts in case someone // is `mem::forget`ing Arcs. If we don't do this the count can overflow // and users will use-after free. We racily saturate to `isize::MAX` on // the assumption that there aren't ~2 billion threads incrementing @@ -920,7 +919,7 @@ impl Arc { // // The acquire label here ensures a happens-before relationship with any // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements - // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded + // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded // weak ref was never dropped, the CAS here will fail so we do not care to synchronize. if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() { // This needs to be an `Acquire` to synchronize with the decrement of the `strong` @@ -978,7 +977,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc { } // This fence is needed to prevent reordering of use of the data and - // deletion of the data. Because it is marked `Release`, the decreasing + // deletion of the data. Because it is marked `Release`, the decreasing // of the reference count synchronizes with this `Acquire` fence. This // means that use of the data happens before decreasing the reference // count, which happens before this fence, which happens before the @@ -1270,13 +1269,13 @@ impl Clone for Weak { } else { return Weak { ptr: self.ptr }; }; - // See comments in Arc::clone() for why this is relaxed. This can use a - // fetch_add (ignoring the lock) because the weak count is only locked + // See comments in `Arc::clone()` for why this is relaxed. This can use a + // `fetch_add` (ignoring the lock) because the weak count is only locked // where are *no other* weak pointers in existence. (So we can't be // running this code in that case). let old_size = inner.weak.fetch_add(1, Relaxed); - // See comments in Arc::clone() for why we do this (for mem::forget). + // See comments in `Arc::clone()` for why we do this (for `mem::forget`). if old_size > MAX_REFCOUNT { unsafe { abort(); diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs index 2b63ac5c7d2f9..358ea2182a52d 100644 --- a/src/liballoc/tests/lib.rs +++ b/src/liballoc/tests/lib.rs @@ -35,8 +35,8 @@ fn hash(t: &T) -> u64 { s.finish() } -// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten. -// See https://github.com/kripken/emscripten-fastcomp/issues/169 +// FIXME: instantiated functions with `i128` in the signature is not supported in Emscripten. +// See . #[cfg(not(target_os = "emscripten"))] #[test] fn test_boxed_hasher() { diff --git a/src/liballoc/tests/linked_list.rs b/src/liballoc/tests/linked_list.rs index 0fbfbdccd4537..ca05b1b236c8f 100644 --- a/src/liballoc/tests/linked_list.rs +++ b/src/liballoc/tests/linked_list.rs @@ -93,7 +93,7 @@ fn test_split_off() { } } - // no-op on the last index + // No-op on the last index. { let mut m = LinkedList::new(); m.push_back(1); diff --git a/src/liballoc/tests/slice.rs b/src/liballoc/tests/slice.rs index 2a9fdfa9324d5..62e1b15908fff 100644 --- a/src/liballoc/tests/slice.rs +++ b/src/liballoc/tests/slice.rs @@ -509,7 +509,7 @@ fn test_rotate_left() { let expected: Vec<_> = (0..13).collect(); let mut v = Vec::new(); - // no-ops + // No-ops. v.clone_from(&expected); v.rotate_left(0); assert_eq!(v, expected); @@ -518,14 +518,14 @@ fn test_rotate_left() { let mut zst_array = [(), (), ()]; zst_array.rotate_left(2); - // happy path + // Happy path. v = (5..13).chain(0..5).collect(); v.rotate_left(8); assert_eq!(v, expected); let expected: Vec<_> = (0..1000).collect(); - // small rotations in large slice, uses ptr::copy + // Small rotations in large slice; uses `ptr::copy`. v = (2..1000).chain(0..2).collect(); v.rotate_left(998); assert_eq!(v, expected); @@ -533,7 +533,7 @@ fn test_rotate_left() { v.rotate_left(2); assert_eq!(v, expected); - // non-small prime rotation, has a few rounds of swapping + // Non-small prime rotation; has a few rounds of swapping. v = (389..1000).chain(0..389).collect(); v.rotate_left(1000-389); assert_eq!(v, expected); @@ -544,7 +544,7 @@ fn test_rotate_right() { let expected: Vec<_> = (0..13).collect(); let mut v = Vec::new(); - // no-ops + // No-ops. v.clone_from(&expected); v.rotate_right(0); assert_eq!(v, expected); @@ -553,14 +553,14 @@ fn test_rotate_right() { let mut zst_array = [(), (), ()]; zst_array.rotate_right(2); - // happy path + // Happy path. v = (5..13).chain(0..5).collect(); v.rotate_right(5); assert_eq!(v, expected); let expected: Vec<_> = (0..1000).collect(); - // small rotations in large slice, uses ptr::copy + // Small rotations in large slice; uses `ptr::copy`. v = (2..1000).chain(0..2).collect(); v.rotate_right(2); assert_eq!(v, expected); @@ -568,7 +568,7 @@ fn test_rotate_right() { v.rotate_right(998); assert_eq!(v, expected); - // non-small prime rotation, has a few rounds of swapping + // Non-small prime rotation; has a few rounds of swapping. v = (389..1000).chain(0..389).collect(); v.rotate_right(389); assert_eq!(v, expected); diff --git a/src/liballoc/tests/str.rs b/src/liballoc/tests/str.rs index a1dc763f6d8ff..f021c7a96513f 100644 --- a/src/liballoc/tests/str.rs +++ b/src/liballoc/tests/str.rs @@ -577,7 +577,7 @@ mod slice_index { in mod rangeinclusive { data: "hello"; // note: using 0 specifically ensures that the result of overflowing is 0..0, - // so that `get` doesn't simply return None for the wrong reason. + // so that `get` doesn't simply return `None` for the wrong reason. bad: data[0..=usize::max_value()]; message: "maximum usize"; } @@ -1551,7 +1551,7 @@ fn to_lowercase() { assert_eq!("".to_lowercase(), ""); assert_eq!("AÉDžaé ".to_lowercase(), "aédžaé "); - // https://github.com/rust-lang/rust/issues/26035 + // See issue #26035. assert_eq!("ΑΣ".to_lowercase(), "ας"); assert_eq!("Α'Σ".to_lowercase(), "α'ς"); assert_eq!("Α''Σ".to_lowercase(), "α''ς"); diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index 57723e4d21281..fb69d84403102 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -371,7 +371,7 @@ impl Vec { /// * `capacity` needs to be the capacity that the pointer was allocated with. /// /// Violating these may cause problems like corrupting the allocator's - /// internal data structures. For example it is **not** safe + /// internal data structures. For example it is *not* safe /// to build a `Vec` from a pointer to a C `char` array and a `size_t`. /// /// The ownership of `ptr` is effectively transferred to the @@ -688,11 +688,10 @@ impl Vec { let mut ptr = self.as_mut_ptr().add(self.len); // Set the final length at the end, keeping in mind that // dropping an element might panic. Works around a missed - // optimization, as seen in the following issue: - // https://github.com/rust-lang/rust/issues/51802 + // optimization, as seen in issue #51802. let mut local_len = SetLenOnDrop::new(&mut self.len); - // drop any extra elements + // Drop any extra elements. for _ in len..current_len { local_len.decrement_len(1); ptr = ptr.offset(-1); @@ -2579,7 +2578,7 @@ impl Drop for Splice<'_, I> { } // There may be more elements. Use the lower bound as an estimate. - // FIXME: Is the upper bound a better guess? Or something else? + // FIXME: is the upper bound a better guess? Or something else? let (lower_bound, _upper_bound) = self.replace_with.size_hint(); if lower_bound > 0 { self.drain.move_tail(lower_bound); diff --git a/src/libarena/lib.rs b/src/libarena/lib.rs index 8ae046c0796bc..aecfd387b3f19 100644 --- a/src/libarena/lib.rs +++ b/src/libarena/lib.rs @@ -413,7 +413,7 @@ impl DroplessArena { } #[derive(Default)] -// FIXME(@Zoxc): this type is entirely unused in rustc +// FIXME(Zoxc): this type is entirely unused in rustc. pub struct SyncTypedArena { lock: MTLock>, } @@ -421,7 +421,7 @@ pub struct SyncTypedArena { impl SyncTypedArena { #[inline(always)] pub fn alloc(&self, object: T) -> &mut T { - // Extend the lifetime of the result since it's limited to the lock guard + // Extend the lifetime of the result since it's limited to the lock guard. unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) } } @@ -430,7 +430,7 @@ impl SyncTypedArena { where T: Copy, { - // Extend the lifetime of the result since it's limited to the lock guard + // Extend the lifetime of the result since it's limited to the lock guard. unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) } } @@ -453,13 +453,13 @@ impl SyncDroplessArena { #[inline(always)] pub fn alloc_raw(&self, bytes: usize, align: usize) -> &mut [u8] { - // Extend the lifetime of the result since it's limited to the lock guard + // Extend the lifetime of the result since it's limited to the lock guard. unsafe { &mut *(self.lock.lock().alloc_raw(bytes, align) as *mut [u8]) } } #[inline(always)] pub fn alloc(&self, object: T) -> &mut T { - // Extend the lifetime of the result since it's limited to the lock guard + // Extend the lifetime of the result since it's limited to the lock guard. unsafe { &mut *(self.lock.lock().alloc(object) as *mut T) } } @@ -468,7 +468,7 @@ impl SyncDroplessArena { where T: Copy, { - // Extend the lifetime of the result since it's limited to the lock guard + // Extend the lifetime of the result since it's limited to the lock guard. unsafe { &mut *(self.lock.lock().alloc_slice(slice) as *mut [T]) } } } diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs index 8383d305518ab..c2c02de76389c 100644 --- a/src/libcore/cell.rs +++ b/src/libcore/cell.rs @@ -670,7 +670,7 @@ impl RefCell { pub fn into_inner(self) -> T { // Since this function takes `self` (the `RefCell`) by value, the // compiler statically verifies that it is not currently borrowed. - // Therefore the following assertion is just a `debug_assert!`. + // Therefore, the following assertion is just a `debug_assert!`. debug_assert!(self.borrow.get() == UNUSED); self.value.into_inner() } @@ -1308,10 +1308,10 @@ impl Drop for BorrowRefMut<'_> { impl<'b> BorrowRefMut<'b> { #[inline] fn new(borrow: &'b Cell) -> Option> { - // NOTE: Unlike BorrowRefMut::clone, new is called to create the initial + // NOTE: unlike `BorrowRefMut::clone`, new is called to create the initial // mutable reference, and so there must currently be no existing - // references. Thus, while clone increments the mutable refcount, here - // we explicitly only allow going from UNUSED to UNUSED - 1. + // references. Thus, while clone increments the mutable ref count, here + // we explicitly only allow going from `UNUSED` to `UNUSED - 1`. match borrow.get() { UNUSED => { borrow.set(UNUSED - 1); @@ -1321,7 +1321,7 @@ impl<'b> BorrowRefMut<'b> { } } - // Clone a `BorrowRefMut`. + // Clones a `BorrowRefMut`. // // This is only valid if each `BorrowRefMut` is used to track a mutable // reference to a distinct, nonoverlapping range of the original object. diff --git a/src/libcore/char/methods.rs b/src/libcore/char/methods.rs index 122e5f3affdc2..d5ff2260a619a 100644 --- a/src/libcore/char/methods.rs +++ b/src/libcore/char/methods.rs @@ -172,12 +172,12 @@ impl char { pub fn escape_unicode(self) -> EscapeUnicode { let c = self as u32; - // or-ing 1 ensures that for c==0 the code computes that one + // Or-ing 1 ensures that for `c == 0` the code computes that one // digit should be printed and (which is the same) avoids the - // (31 - 32) underflow + // `31 - 32` underflow. let msb = 31 - (c | 1).leading_zeros(); - // the index of the most significant hex digit + // The index of the most significant hex digit. let ms_hex_digit = msb / 4; EscapeUnicode { c: self, diff --git a/src/libcore/convert.rs b/src/libcore/convert.rs index b8d751cfbb6df..25a2d546d0e26 100644 --- a/src/libcore/convert.rs +++ b/src/libcore/convert.rs @@ -17,7 +17,7 @@ //! [`TryFrom`][`TryFrom`] rather than [`Into`][`Into`] or [`TryInto`][`TryInto`], //! as [`From`] and [`TryFrom`] provide greater flexibility and offer //! equivalent [`Into`] or [`TryInto`] implementations for free, thanks to a -//! blanket implementation in the standard library. However, there are some cases +//! blanket implementation in the standard library. However, there are some cases //! where this is not possible, such as creating conversions into a type defined //! outside your library, so implementing [`Into`] instead of [`From`] is //! sometimes necessary. @@ -408,7 +408,7 @@ impl AsRef for &mut T where T: AsRef } } -// FIXME (#45742): replace the above impls for &/&mut with the following more general one: +// FIXME(#45742): replace the above impls for `&`/`&mut` with the following more general one: // // As lifts over Deref // impl AsRef for D where D::Target: AsRef { // fn as_ref(&self) -> &U { @@ -416,7 +416,7 @@ impl AsRef for &mut T where T: AsRef // } // } -// AsMut lifts over &mut +// `AsMut` lifts over `&mut`. #[stable(feature = "rust1", since = "1.0.0")] impl AsMut for &mut T where T: AsMut { @@ -425,7 +425,7 @@ impl AsMut for &mut T where T: AsMut } } -// FIXME (#45742): replace the above impl for &mut with the following more general one: +// FIXME(#45742): replace the above impl for &mut with the following more general one: // // AsMut lifts over DerefMut // impl AsMut for D where D::Target: AsMut { // fn as_mut(&mut self) -> &mut U { @@ -433,7 +433,7 @@ impl AsMut for &mut T where T: AsMut // } // } -// From implies Into +// `From` implies `Into`. #[stable(feature = "rust1", since = "1.0.0")] impl Into for T where U: From { @@ -442,14 +442,14 @@ impl Into for T where U: From } } -// From (and thus Into) is reflexive +// `From` (and thus `Into`) is reflexive. #[stable(feature = "rust1", since = "1.0.0")] impl From for T { fn from(t: T) -> T { t } } -// TryFrom implies TryInto +// `TryFrom` implies `TryInto`. #[unstable(feature = "try_from", issue = "33417")] impl TryInto for T where U: TryFrom { diff --git a/src/libcore/fmt/float.rs b/src/libcore/fmt/float.rs index 20c626cef1b16..78541b3378846 100644 --- a/src/libcore/fmt/float.rs +++ b/src/libcore/fmt/float.rs @@ -13,7 +13,7 @@ fn float_to_decimal_common_exact(fmt: &mut Formatter, num: &T, let mut buf = MaybeUninit::<[u8; 1024]>::uninitialized(); // enough for f32 and f64 let mut parts = MaybeUninit::<[flt2dec::Part; 4]>::uninitialized(); // FIXME(#53491): Technically, this is calling `get_mut` on an uninitialized - // `MaybeUninit` (here and elsewhere in this file). Revisit this once + // `MaybeUninit` (here and elsewhere in this file). Revisit this once // we decided whether that is valid or not. let formatted = flt2dec::to_exact_fixed_str(flt2dec::strategy::grisu::format_exact, *num, sign, precision, diff --git a/src/libcore/fmt/mod.rs b/src/libcore/fmt/mod.rs index 7efb7f31298bf..3e3213596b129 100644 --- a/src/libcore/fmt/mod.rs +++ b/src/libcore/fmt/mod.rs @@ -237,7 +237,7 @@ pub struct Formatter<'a> { args: &'a [ArgumentV1<'a>], } -// NB. Argument is essentially an optimized partially applied formatting function, +// N.B., argument is essentially an optimized partially applied formatting function, // equivalent to `exists T.(&T, fn(&T, &mut Formatter) -> Result`. struct Void { @@ -1695,8 +1695,7 @@ impl<'a> Formatter<'a> { self.flags & (1 << FlagV1::SignAwareZeroPad as u32) != 0 } - // FIXME: Decide what public API we want for these two flags. - // https://github.com/rust-lang/rust/issues/48584 + // FIXME: decide what public API we want for these two flags. See issue #48584. fn debug_lower_hex(&self) -> bool { self.flags & (1 << FlagV1::DebugLowerHex as u32) != 0 } fn debug_upper_hex(&self) -> bool { self.flags & (1 << FlagV1::DebugUpperHex as u32) != 0 } diff --git a/src/libcore/iter/sources.rs b/src/libcore/iter/sources.rs index 6f45f082d6a37..65a6e610efbc5 100644 --- a/src/libcore/iter/sources.rs +++ b/src/libcore/iter/sources.rs @@ -582,7 +582,7 @@ impl fmt::Debug for FromFn { pub fn successors(first: Option, succ: F) -> Successors where F: FnMut(&T) -> Option { - // If this function returned `impl Iterator` + // If this function returned `impl Iterator` // it could be based on `unfold` and not need a dedicated type. // However having a named `Successors` type allows it to be `Clone` when `T` and `F` are. Successors { diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs index 227fb22bc7d93..f2ebe1d3213f6 100644 --- a/src/libcore/lib.rs +++ b/src/libcore/lib.rs @@ -17,7 +17,7 @@ //! //! Please note that all of these details are currently not considered stable. //! -// FIXME: Fill me in with more detail when the interface settles +// FIXME: fill me in with more detail when the interface settles. //! This library is built on the assumption of a few existing symbols: //! //! * `memcpy`, `memcmp`, `memset` - These are core memory routines which are diff --git a/src/libcore/mem.rs b/src/libcore/mem.rs index 3f7455aeb59b9..06f94f1b00373 100644 --- a/src/libcore/mem.rs +++ b/src/libcore/mem.rs @@ -831,7 +831,7 @@ pub unsafe fn transmute_copy(src: &T) -> U { #[stable(feature = "discriminant_value", since = "1.21.0")] pub struct Discriminant(u64, PhantomData T>); -// N.B. These trait implementations cannot be derived because we don't want any bounds on T. +// N.B., these trait implementations cannot be derived because we don't want any bounds on `T`. #[stable(feature = "discriminant_value", since = "1.21.0")] impl Copy for Discriminant {} @@ -1053,7 +1053,7 @@ impl DerefMut for ManuallyDrop { /// This is exploited by the compiler for various optimizations, such as eliding /// run-time checks and optimizing `enum` layout. /// -/// Not initializing memory at all (instead of zero--initializing it) causes the same +/// Not initializing memory at all (instead of zero-initializing it) causes the same /// issue: after all, the initial value of the variable might just happen to be /// one that violates the invariant. /// @@ -1077,14 +1077,14 @@ impl DerefMut for ManuallyDrop { /// The compiler then knows to not optimize this code. #[allow(missing_debug_implementations)] #[unstable(feature = "maybe_uninit", issue = "53491")] -// NOTE after stabilizing `MaybeUninit` proceed to deprecate `mem::{uninitialized,zeroed}` +// NOTE: after stabilizing `MaybeUninit`, proceed to deprecate `mem::{uninitialized,zeroed}`. pub union MaybeUninit { uninit: (), value: ManuallyDrop, } impl MaybeUninit { - /// Create a new `MaybeUninit` initialized with the given value. + /// Creates a new `MaybeUninit` initialized with the given value. /// /// Note that dropping a `MaybeUninit` will never call `T`'s drop code. /// It is your responsibility to make sure `T` gets dropped if it got initialized. @@ -1176,7 +1176,7 @@ impl MaybeUninit { /// It is up to the caller to guarantee that the `MaybeUninit` really is in an initialized /// state, otherwise this will immediately cause undefined behavior. // FIXME(#53491): We currently rely on the above being incorrect, i.e., we have references - // to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make + // to uninitialized data (e.g., in `libcore/fmt/float.rs`). We should make // a final decision about the rules before stabilization. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] @@ -1192,7 +1192,7 @@ impl MaybeUninit { unsafe { &*self.value as *const T } } - /// Get sa mutable pointer to the contained value. Reading from this pointer or turning it + /// Gets a mutable pointer to the contained value. Reading from this pointer or turning it /// into a reference will be undefined behavior unless the `MaybeUninit` is initialized. #[unstable(feature = "maybe_uninit", issue = "53491")] #[inline(always)] diff --git a/src/libcore/num/bignum.rs b/src/libcore/num/bignum.rs index c3a42a0fc0494..6889d82aa4a51 100644 --- a/src/libcore/num/bignum.rs +++ b/src/libcore/num/bignum.rs @@ -47,7 +47,8 @@ macro_rules! impl_full_ops { $( impl FullOps for $ty { fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) { - // This cannot overflow; the output is between `0` and `2 * 2^nbits - 1`. + // This cannot overflow; + // the output is between `0` and `2 * 2^nbits - 1`. // FIXME: will LLVM optimize this into ADC or similar? let (v, carry1) = intrinsics::add_with_overflow(self, other); let (v, carry2) = intrinsics::add_with_overflow(v, if carry {1} else {0}); @@ -74,7 +75,8 @@ macro_rules! impl_full_ops { fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) { debug_assert!(borrow < other); - // This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`. + // This cannot overflow; + // the output is between `0` and `other * (2^nbits - 1)`. let nbits = mem::size_of::<$ty>() * 8; let lhs = ((borrow as $bigty) << nbits) | (self as $bigty); let rhs = other as $bigty; @@ -396,7 +398,7 @@ macro_rules! define_bignum { // Stupid slow base-2 long division taken from // https://en.wikipedia.org/wiki/Division_algorithm - // FIXME use a greater base ($ty) for the long division. + // FIXME: use a greater base ($ty) for the long division. assert!(!d.is_zero()); let digitbits = mem::size_of::<$ty>() * 8; for digit in &mut q.base[..] { diff --git a/src/libcore/num/dec2flt/algorithm.rs b/src/libcore/num/dec2flt/algorithm.rs index 3b57bb7544b35..da7491e027542 100644 --- a/src/libcore/num/dec2flt/algorithm.rs +++ b/src/libcore/num/dec2flt/algorithm.rs @@ -284,7 +284,7 @@ pub fn algorithm_m(f: &Big, e: i16) -> T { v = Big::from_small(1); v.mul_pow5(e_abs).mul_pow2(e_abs); } else { - // FIXME possible optimization: generalize big_to_fp so that we can do the equivalent of + // FIXME: possible optimization: generalize big_to_fp so that we can do the equivalent of // fp_to_float(big_to_fp(u)) here, only without the double rounding. u = f.clone(); u.mul_pow5(e_abs).mul_pow2(e_abs); @@ -301,7 +301,7 @@ pub fn algorithm_m(f: &Big, e: i16) -> T { // We have to stop at the minimum exponent, if we wait until `k < T::MIN_EXP_INT`, // then we'd be off by a factor of two. Unfortunately this means we have to special- // case normal numbers with the minimum exponent. - // FIXME find a more elegant formulation, but run the `tiny-pow10` test to make sure + // FIXME: find a more elegant formulation, but run the `tiny-pow10` test to make sure // that it's actually correct! if x >= min_sig && x <= max_sig { break; @@ -330,11 +330,11 @@ pub fn algorithm_m(f: &Big, e: i16) -> T { fn quick_start(u: &mut Big, v: &mut Big, k: &mut i16) { // The bit length is an estimate of the base two logarithm, and log(u / v) = log(u) - log(v). // The estimate is off by at most 1, but always an under-estimate, so the error on log(u) - // and log(v) are of the same sign and cancel out (if both are large). Therefore the error + // and log(v) are of the same sign and cancel out (if both are large). Therefore, the error // for log(u / v) is at most one as well. // The target ratio is one where u/v is in an in-range significand. Thus our termination // condition is log2(u / v) being the significand bits, plus/minus one. - // FIXME Looking at the second bit could improve the estimate and avoid some more divisions. + // FIXME: Looking at the second bit could improve the estimate and avoid some more divisions. let target_ratio = T::SIG_BITS as i16; let log2_u = u.bit_length() as i16; let log2_v = v.bit_length() as i16; @@ -380,7 +380,7 @@ fn underflow(x: Big, v: Big, rem: Big) -> T { // \-----/\-------/ \------------/ // q trunc. (represented by rem) // - // Therefore, when the rounded-off bits are != 0.5 ULP, they decide the rounding + // Therefore, when the rounded-off bits are not equal to half ULP, they decide the rounding // on their own. When they are equal and the remainder is non-zero, the value still // needs to be rounded up. Only when the rounded off bits are 1/2 and the remainder // is zero, we have a half-to-even situation. diff --git a/src/libcore/num/dec2flt/mod.rs b/src/libcore/num/dec2flt/mod.rs index 47ea5aa5ff000..f6e60ed2fd863 100644 --- a/src/libcore/num/dec2flt/mod.rs +++ b/src/libcore/num/dec2flt/mod.rs @@ -264,7 +264,7 @@ fn convert(mut decimal: Decimal) -> Result { // Now the exponent certainly fits in 16 bit, which is used throughout the main algorithms. let e = e as i16; - // FIXME These bounds are rather conservative. A more careful analysis of the failure modes + // FIXME: These bounds are rather conservative. A more careful analysis of the failure modes // of Bellerophon could allow using it in more cases for a massive speed up. let exponent_in_range = table::MIN_E <= e && e <= table::MAX_E; let value_in_range = upper_bound <= T::MAX_NORMAL_DIGITS as u64; diff --git a/src/libcore/num/dec2flt/num.rs b/src/libcore/num/dec2flt/num.rs index 126713185711b..66d3c3c2f50d1 100644 --- a/src/libcore/num/dec2flt/num.rs +++ b/src/libcore/num/dec2flt/num.rs @@ -1,12 +1,12 @@ //! Utility functions for bignums that don't make too much sense to turn into methods. -// FIXME This module's name is a bit unfortunate, since other modules also import `core::num`. +// FIXME: This module's name is a bit unfortunate, since other modules also import `core::num`. use cmp::Ordering::{self, Less, Equal, Greater}; pub use num::bignum::Big32x40 as Big; -/// Test whether truncating all bits less significant than `ones_place` introduces +/// Tests whether truncating all bits less significant than `ones_place` introduces /// a relative error less, equal, or greater than 0.5 ULP. pub fn compare_with_half_ulp(f: &Big, ones_place: usize) -> Ordering { if ones_place == 0 { diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs index dc0580764acb7..b93c5c7f7fb8f 100644 --- a/src/libcore/num/f32.rs +++ b/src/libcore/num/f32.rs @@ -378,7 +378,7 @@ impl f32 { // When either x or y is a signalingNaN, then the result is according to 6.2. // // Since we do not support sNaN in Rust yet, we do not need to handle them. - // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by + // FIXME(nagisa): due to , we canonicalize by // multiplying by 1.0. Should switch to the `canonicalize` when it works. (if self.is_nan() || self < other { other } else { self }) * 1.0 } @@ -402,7 +402,7 @@ impl f32 { // When either x or y is a signalingNaN, then the result is according to 6.2. // // Since we do not support sNaN in Rust yet, we do not need to handle them. - // FIXME(nagisa): due to https://bugs.llvm.org/show_bug.cgi?id=33303 we canonicalize by + // FIXME(nagisa): due to , we canonicalize by // multiplying by 1.0. Should switch to the `canonicalize` when it works. (if other.is_nan() || self < other { self } else { other }) * 1.0 } diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 6fb67ea9c9acb..1f13b3a768752 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -3582,7 +3582,7 @@ assert_eq!(7", stringify!($SelfT), ".rem_euclid(4), 3); // or any other integer } doc_comment! { - concat!("Returns `true` if and only if `self == 2^k` for some `k`. + concat!("Returns `true` if `self == 2^k` for some `k`. # Examples @@ -4774,7 +4774,7 @@ fn from_str_radix(src: &str, radix: u32) -> Result T::min_value(); // all valid digits are ascii, so we will just iterate over the utf8 bytes - // and cast them to chars. .to_digit() will safely return None for anything + // and cast them to chars. .to_digit() will safely return `None` for anything // other than a valid ascii digit for the given radix, including the first-byte // of multi-byte sequences let src = src.as_bytes(); diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index 50e189c9e3640..4174ba308364c 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -95,7 +95,7 @@ macro_rules! sh_impl_unsigned { ) } -// FIXME (#23545): uncomment the remaining impls +// FIXME(#23545): uncomment the remaining impls macro_rules! sh_impl_all { ($($t:ident)*) => ($( //sh_impl_unsigned! { $t, u8 } @@ -832,7 +832,7 @@ assert_eq!(n.leading_zeros(), 2); } doc_comment! { - concat!("Returns `true` if and only if `self == 2^k` for some `k`. + concat!("Returns `true` if `self == 2^k` for some `k`. # Examples diff --git a/src/libcore/option.rs b/src/libcore/option.rs index 76ef36ac30962..b84fc17f4b52b 100644 --- a/src/libcore/option.rs +++ b/src/libcore/option.rs @@ -41,7 +41,7 @@ //! ``` //! // -// FIXME: Show how `Option` is used in practice, with lots of methods +// FIXME: show how `Option` is used in practice, with lots of methods // //! # Options and pointers ("nullable" pointers) //! diff --git a/src/libcore/panic.rs b/src/libcore/panic.rs index 1abc0a18a9cc9..99186ef397dd3 100644 --- a/src/libcore/panic.rs +++ b/src/libcore/panic.rs @@ -116,8 +116,8 @@ impl<'a> PanicInfo<'a> { /// ``` #[stable(feature = "panic_hooks", since = "1.10.0")] pub fn location(&self) -> Option<&Location> { - // NOTE: If this is changed to sometimes return None, - // deal with that case in std::panicking::default_hook and std::panicking::begin_panic_fmt. + // N.B., if this is changed to sometimes return `None`, deal with that case in + // `std::panicking::default_hook` and `std::panicking::begin_panic_fmt`. Some(&self.location) } } @@ -131,9 +131,9 @@ impl fmt::Display for PanicInfo<'_> { } else if let Some(payload) = self.payload.downcast_ref::<&'static str>() { write!(formatter, "'{}', ", payload)? } - // NOTE: we cannot use downcast_ref::() here - // since String is not available in libcore! - // The payload is a String when `std::panic!` is called with multiple arguments, + // NOTE: we cannot use `downcast_ref::()` here + // since `String` is not available in libcore! + // The payload is a `String` when `std::panic!` is called with multiple arguments, // but in that case the message is also available. self.location.fmt(formatter) diff --git a/src/libcore/panicking.rs b/src/libcore/panicking.rs index d9cdb2a2b8a9f..eefe145a2fc13 100644 --- a/src/libcore/panicking.rs +++ b/src/libcore/panicking.rs @@ -70,8 +70,9 @@ pub fn panic_fmt(fmt: fmt::Arguments, file_line_col: &(&'static str, u32, u32)) unsafe { super::intrinsics::abort() } } - // NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call - #[allow(improper_ctypes)] // PanicInfo contains a trait object which is not FFI safe + // NOTE: this function never crosses the FFI boundary; it's a Rust-to-Rust call. + // `PanicInfo` contains a trait object which is not FFI safe. + #[allow(improper_ctypes)] extern "Rust" { #[lang = "panic_impl"] fn panic_impl(pi: &PanicInfo) -> !; diff --git a/src/libcore/pin.rs b/src/libcore/pin.rs index ee9098d73ee92..c2d10b3f13f38 100644 --- a/src/libcore/pin.rs +++ b/src/libcore/pin.rs @@ -366,8 +366,8 @@ impl fmt::Pointer for Pin

{ } // Note: this means that any impl of `CoerceUnsized` that allows coercing from -// a type that impls `Deref` to a type that impls -// `Deref` is unsound. Any such impl would probably be unsound +// a type that impls `Deref` to a type that impls +// `Deref` is unsound. Any such impl would probably be unsound // for other reasons, though, so we just need to take care not to allow such // impls to land in std. #[stable(feature = "pin", since = "1.33.0")] diff --git a/src/libcore/ptr.rs b/src/libcore/ptr.rs index 866c8d0896b3c..7bc125ed3269e 100644 --- a/src/libcore/ptr.rs +++ b/src/libcore/ptr.rs @@ -185,7 +185,7 @@ pub unsafe fn drop_in_place(to_drop: *mut T) { // The real `drop_in_place` -- the one that gets called implicitly when variables go // out of scope -- should have a safe reference and not a raw pointer as argument -// type. When we drop a local variable, we access it with a pointer that behaves +// type. When we drop a local variable, we access it with a pointer that behaves // like a safe reference; transmuting that to a raw pointer does not mean we can // actually access it with raw pointers. #[lang = "drop_in_place"] @@ -366,13 +366,13 @@ pub(crate) unsafe fn swap_nonoverlapping_one(x: *mut T, y: *mut T) { #[inline] unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { - // The approach here is to utilize simd to swap x & y efficiently. Testing reveals + // The approach here is to utilize simd to swap `x` and `y` efficiently. Testing reveals // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel // Haswell E processors. LLVM is more able to optimize if we give a struct a - // #[repr(simd)], even if we don't actually use this struct directly. + // `#[repr(simd)]`, even if we don't actually use this struct directly. // - // FIXME repr(simd) broken on emscripten and redox - // It's also broken on big-endian powerpc64 and s390x. #42778 + // FIXME: `repr(simd)` broken on emscripten and redox + // It's also broken on big-endian powerpc64 and s390x (see issue #42778). #[cfg_attr(not(any(target_os = "emscripten", target_os = "redox", target_endian = "big")), repr(simd))] @@ -381,20 +381,20 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { let block_size = mem::size_of::(); - // Loop through x & y, copying them `Block` at a time + // Loop through `x` and `y`, copying them `Block` at a time // The optimizer should unroll the loop fully for most types - // N.B. We can't use a for loop as the `range` impl calls `mem::swap` recursively + // N.B., we can't use a for loop as the `range` impl calls `mem::swap` recursively. let mut i = 0; while i + block_size <= len { // Create some uninitialized memory as scratch space - // Declaring `t` here avoids aligning the stack when this loop is unused + // Declaring `t` here avoids aligning the stack when this loop is unused. let mut t = mem::MaybeUninit::::uninitialized(); let t = t.as_mut_ptr() as *mut u8; let x = x.add(i); let y = y.add(i); - // Swap a block of bytes of x & y, using t as a temporary buffer - // This should be optimized into efficient SIMD operations where available + // Swap a block of bytes of x & y, using t as a temporary buffer. + // This should be optimized into efficient SIMD operations where available. copy_nonoverlapping(x, t, block_size); copy_nonoverlapping(y, x, block_size); copy_nonoverlapping(t, y, block_size); @@ -402,7 +402,7 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { } if i < len { - // Swap any remaining bytes + // Swap any remaining bytes. let mut t = mem::MaybeUninit::::uninitialized(); let rem = len - i; diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs index acca9748372ca..ae7a4310c8869 100644 --- a/src/libcore/slice/mod.rs +++ b/src/libcore/slice/mod.rs @@ -451,14 +451,14 @@ impl [T] { let ln = self.len(); // For very small types, all the individual reads in the normal - // path perform poorly. We can do better, given efficient unaligned + // path perform poorly. We can do better, given efficient unaligned // load/store, by loading a larger chunk and reversing a register. // Ideally LLVM would do this for us, as it knows better than we do // whether unaligned reads are efficient (since that changes between // different ARM versions, for example) and what the best chunk size - // would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls - // the loop, so we need to do this ourselves. (Hypothesis: reverse + // would be. Unfortunately, as of LLVM 4.0 (2017-05) it only unrolls + // the loop, so we need to do this ourselves. (Hypothesis: reverse // is troublesome because the sides can be aligned differently -- // will be, when the length is odd -- so there's no way of emitting // pre- and postludes to use fully-aligned SIMD in the middle.) @@ -1647,7 +1647,7 @@ impl [T] { // over all the elements, swapping as we go so that at the end // the elements we wish to keep are in the front, and those we // wish to reject are at the back. We can then split the slice. - // This operation is still O(n). + // This operation is still `O(n)`. // // Example: We start in this state, where `r` represents "next // read" and `w` represents "next_write`. @@ -1658,9 +1658,9 @@ impl [T] { // +---+---+---+---+---+---+ // w // - // Comparing self[r] against self[w-1], this is not a duplicate, so - // we swap self[r] and self[w] (no effect as r==w) and then increment both - // r and w, leaving us with: + // Comparing `self[r]` against `self[w - 1]`, this is not a duplicate, so + // we swap `self[r]` and `self[w]` (no effect as `r == w`) and then increment both + // `r` and `w`, leaving us with: // // r // +---+---+---+---+---+---+ @@ -1668,7 +1668,7 @@ impl [T] { // +---+---+---+---+---+---+ // w // - // Comparing self[r] against self[w-1], this value is a duplicate, + // Comparing `self[r]` against `self[w - 1]`, this value is a duplicate, // so we increment `r` but leave everything else unchanged: // // r @@ -1677,8 +1677,8 @@ impl [T] { // +---+---+---+---+---+---+ // w // - // Comparing self[r] against self[w-1], this is not a duplicate, - // so swap self[r] and self[w] and advance r and w: + // Comparing `self[r]` against `self[w - 1]`, this is not a duplicate, + // so swap `self[r]` and `self[w]` and advance `r` and `w`: // // r // +---+---+---+---+---+---+ @@ -1694,7 +1694,7 @@ impl [T] { // +---+---+---+---+---+---+ // w // - // Duplicate, advance r. End of slice. Split at w. + // Duplicate, advance `r`. End of slice. Split at `w`. let len = self.len(); if len <= 1 { @@ -1894,7 +1894,7 @@ impl [T] { pub fn clone_from_slice(&mut self, src: &[T]) where T: Clone { assert!(self.len() == src.len(), "destination and source slices have different lengths"); - // NOTE: We need to explicitly slice them to the same length + // NOTE: we need to explicitly slice them to the same length // for bounds checking to be elided, and the optimizer will // generate memcpy for simple cases (for example T = u8). let len = self.len(); @@ -2106,8 +2106,8 @@ impl [T] { #[inline] fn gcd(a: usize, b: usize) -> usize { // iterative stein’s algorithm - // We should still make this `const fn` (and revert to recursive algorithm if we do) - // because relying on llvm to consteval all this is… well, it makes me uncomfortable. + // We should still make this `const fn` (and revert to recursive algorithm if we do), + // because relying on LLVM to const-eval all this does not make me comfortable. let (ctz_a, mut ctz_b) = unsafe { if a == 0 { return b; } if b == 0 { return a; } @@ -2176,19 +2176,19 @@ impl [T] { pub unsafe fn align_to(&self) -> (&[T], &[U], &[T]) { // Note that most of this function will be constant-evaluated, if ::mem::size_of::() == 0 || ::mem::size_of::() == 0 { - // handle ZSTs specially, which is – don't handle them at all. + // Handle ZSTs specially, which is -- don't handle them at all. return (self, &[], &[]); } - // First, find at what point do we split between the first and 2nd slice. Easy with - // ptr.align_offset. + // First, find at what point do we split between the first and 2nd slice -- easy with + // `ptr.align_offset`. let ptr = self.as_ptr(); let offset = ::ptr::align_offset(ptr, ::mem::align_of::()); if offset > self.len() { (self, &[], &[]) } else { let (left, rest) = self.split_at(offset); - // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay + // Now `rest` is definitely aligned, so `from_raw_parts_mut` below is ok. let (us_len, ts_len) = rest.align_to_offsets::(); (left, from_raw_parts(rest.as_ptr() as *const U, us_len), @@ -2241,7 +2241,7 @@ impl [T] { (self, &mut [], &mut []) } else { let (left, rest) = self.split_at_mut(offset); - // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay + // now `rest` is definitely aligned, so `from_raw_parts_mut` below is ok let (us_len, ts_len) = rest.align_to_offsets::(); let mut_ptr = rest.as_mut_ptr(); (left, @@ -2873,7 +2873,7 @@ macro_rules! iterator { #[inline(always)] unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T { if mem::size_of::() == 0 { - // This is *reducing* the length. `ptr` never changes with ZST. + // This is *reducing* the length. `ptr` never changes with ZST. self.end = (self.end as * $raw_mut u8).wrapping_offset(-offset) as * $raw_mut T; self.ptr } else { @@ -3124,9 +3124,10 @@ macro_rules! iterator { #[stable(feature = "rust1", since = "1.0.0")] pub struct Iter<'a, T: 'a> { ptr: *const T, - end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that - // ptr == end is a quick test for the Iterator being empty, that works - // for both ZST and non-ZST. + // If `T` is a ZST, this is actually `ptr + len`. This encoding is picked so that + // `ptr == end` is a quick test for the `Iterator` being empty, which works + // for both ZST and non-ZST. + end: *const T, _marker: marker::PhantomData<&'a T>, } @@ -3226,9 +3227,10 @@ impl AsRef<[T]> for Iter<'_, T> { #[stable(feature = "rust1", since = "1.0.0")] pub struct IterMut<'a, T: 'a> { ptr: *mut T, - end: *mut T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that - // ptr == end is a quick test for the Iterator being empty, that works - // for both ZST and non-ZST. + // If `T` is a ZST, this is actually `ptr + len`. This encoding is picked so that + // `ptr == end` is a quick test for the `Iterator` being empty, that works + // for both ZST and non-ZST. + end: *mut T, _marker: marker::PhantomData<&'a mut T>, } @@ -3321,7 +3323,7 @@ impl fmt::Debug for Split<'_, T, P> where P: FnMut(&T) -> bool } } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): remove in favor of `#[derive(Clone)]`. #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Split<'_, T, P> where P: Clone + FnMut(&T) -> bool { fn clone(&self) -> Self { @@ -3432,7 +3434,8 @@ impl<'a, T, P> Iterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool { fn next(&mut self) -> Option<&'a mut [T]> { if self.finished { return None; } - let idx_opt = { // work around borrowck limitations + // Work around borrowck limitations. + let idx_opt = { let pred = &mut self.pred; self.v.iter().position(|x| (*pred)(x)) }; @@ -3452,8 +3455,8 @@ impl<'a, T, P> Iterator for SplitMut<'a, T, P> where P: FnMut(&T) -> bool { if self.finished { (0, Some(0)) } else { - // if the predicate doesn't match anything, we yield one slice - // if it matches every element, we yield len+1 empty slices. + // If the predicate doesn't match anything, we yield one slice + // if it matches every element, we yield `len + 1` empty slices. (1, Some(self.v.len() + 1)) } } @@ -3758,7 +3761,7 @@ pub struct Windows<'a, T:'a> { size: usize } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Windows<'_, T> { fn clone(&self) -> Self { @@ -3871,7 +3874,7 @@ pub struct Chunks<'a, T:'a> { chunk_size: usize } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Chunks<'_, T> { fn clone(&self) -> Self { @@ -4132,7 +4135,7 @@ impl<'a, T> ChunksExact<'a, T> { } } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "chunks_exact", since = "1.31.0")] impl Clone for ChunksExact<'_, T> { fn clone(&self) -> Self { @@ -4359,7 +4362,7 @@ pub struct RChunks<'a, T:'a> { chunk_size: usize } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "rchunks", since = "1.31.0")] impl<'a, T> Clone for RChunks<'a, T> { fn clone(&self) -> RChunks<'a, T> { @@ -4628,7 +4631,7 @@ impl<'a, T> RChunksExact<'a, T> { } } -// FIXME(#26925) Remove in favor of `#[derive(Clone)]` +// FIXME(#26925): Remove in favor of `#[derive(Clone)]`. #[stable(feature = "rchunks", since = "1.31.0")] impl<'a, T> Clone for RChunksExact<'a, T> { fn clone(&self) -> RChunksExact<'a, T> { diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs index 3f84faa049939..543365bde6639 100644 --- a/src/libcore/slice/sort.rs +++ b/src/libcore/slice/sort.rs @@ -225,7 +225,7 @@ fn partition_in_blocks(v: &mut [T], pivot: &T, is_less: &mut F) -> usize let mut end_r = ptr::null_mut(); let mut offsets_r: [MaybeUninit; BLOCK] = uninitialized_array![u8; BLOCK]; - // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather + // FIXME: when we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient. // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive). diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs index 76a45e53684bd..812e28077eb9e 100644 --- a/src/libcore/str/mod.rs +++ b/src/libcore/str/mod.rs @@ -502,21 +502,21 @@ pub fn next_code_point<'a, I: Iterator>(bytes: &mut I) -> Option< return Some(x as u32) } - // Multibyte case follows - // Decode from a byte combination out of: [[[x y] z] w] - // NOTE: Performance is sensitive to the exact formulation here + // Multibyte case follows. + // Decode from a byte combination out of: `[[[x y] z] w]`. + // NOTE: performance is sensitive to the exact formulation here let init = utf8_first_byte(x, 2); let y = unwrap_or_0(bytes.next()); let mut ch = utf8_acc_cont_byte(init, y); if x >= 0xE0 { - // [[x y z] w] case - // 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid + // `[[x y z] w]` case. + // 5th bit in `0xE0 .. 0xEF` is always clear, so `init` is still valid. let z = unwrap_or_0(bytes.next()); let y_z = utf8_acc_cont_byte((y & CONT_MASK) as u32, z); ch = init << 12 | y_z; if x >= 0xF0 { - // [x y z w] case - // use only the lower 3 bits of `init` + // `[x y z w]` case. + // Use only the lower 3 bits of `init`. let w = unwrap_or_0(bytes.next()); ch = (init & 7) << 18 | utf8_acc_cont_byte(y_z, w); } @@ -531,14 +531,14 @@ pub fn next_code_point<'a, I: Iterator>(bytes: &mut I) -> Option< fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option where I: DoubleEndedIterator, { - // Decode UTF-8 + // Decode UTF-8. let w = match *bytes.next_back()? { next_byte if next_byte < 128 => return Some(next_byte as u32), back_byte => back_byte, }; - // Multibyte case follows - // Decode from a byte combination out of: [x [y [z w]]] + // Multibyte case follows. + // Decode from a byte combination out of: `[x [y [z w]]]`. let mut ch; let z = unwrap_or_0(bytes.next_back()); ch = utf8_first_byte(z, 2); @@ -564,7 +564,7 @@ impl<'a> Iterator for Chars<'a> { #[inline] fn next(&mut self) -> Option { next_code_point(&mut self.iter).map(|ch| { - // str invariant says `ch` is a valid Unicode Scalar Value + // `str` invariant says `ch` is a valid Unicode Scalar Value. unsafe { char::from_u32_unchecked(ch) } @@ -573,7 +573,7 @@ impl<'a> Iterator for Chars<'a> { #[inline] fn count(self) -> usize { - // length in `char` is equal to the number of non-continuation bytes + // Length in `char`s is equal to the number of non-continuation bytes. let bytes_len = self.iter.len(); let mut cont_bytes = 0; for &byte in self.iter { @@ -603,7 +603,7 @@ impl<'a> DoubleEndedIterator for Chars<'a> { #[inline] fn next_back(&mut self) -> Option { next_code_point_reverse(&mut self.iter).map(|ch| { - // str invariant says `ch` is a valid Unicode Scalar Value + // `str` invariant says `ch` is a valid Unicode Scalar Value. unsafe { char::from_u32_unchecked(ch) } diff --git a/src/libcore/str/pattern.rs b/src/libcore/str/pattern.rs index 2571780ad0bab..befa692e94459 100644 --- a/src/libcore/str/pattern.rs +++ b/src/libcore/str/pattern.rs @@ -313,7 +313,7 @@ unsafe impl<'a> Searcher<'a> for CharSearcher<'a> { // ꁁ (U+A041 YI SYLLABLE PA), utf-8 `EA 81 81` will have us always find // the second byte when searching for the third. // - // However, this is totally okay. While we have the invariant that + // However, this is totally ok. While we have the invariant that // self.finger is on a UTF8 boundary, this invariant is not relied upon // within this method (it is relied upon in CharSearcher::next()). // @@ -625,7 +625,7 @@ macro_rules! searcher_methods { // Impl for &[char] ///////////////////////////////////////////////////////////////////////////// -// Todo: Change / Remove due to ambiguity in meaning. +// FIXME: change/remove due to ambiguity in meaning. /// Associated type for `<&[char] as Pattern<'a>>::Searcher`. #[derive(Clone, Debug)] diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 8c5dde7dc271b..2e573f360b65a 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -2403,7 +2403,7 @@ pub fn fence(order: Ordering) { /// of memory re-ordering the compiler is allowed to do. Specifically, depending on /// the given [`Ordering`] semantics, the compiler may be disallowed from moving reads /// or writes from before or after the call to the other side of the call to -/// `compiler_fence`. Note that it does **not** prevent the *hardware* +/// `compiler_fence`. Note that it does *not* prevent the *hardware* /// from doing such re-ordering. This is not a problem in a single-threaded, /// execution context, but when other threads may modify memory at the same /// time, stronger synchronization primitives such as [`fence`] are required. diff --git a/src/libcore/tests/hash/mod.rs b/src/libcore/tests/hash/mod.rs index bf3039a7e51e8..45c9ad08b4c87 100644 --- a/src/libcore/tests/hash/mod.rs +++ b/src/libcore/tests/hash/mod.rs @@ -113,8 +113,8 @@ fn test_custom_state() { assert_eq!(hash(&Custom { hash: 5 }), 5); } -// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten. -// See https://github.com/kripken/emscripten-fastcomp/issues/169 +// FIXME: instantiated functions with i128 in the signature is not supported in Emscripten. +// See . #[cfg(not(target_os = "emscripten"))] #[test] fn test_indirect_hasher() { diff --git a/src/libcore/tests/num/dec2flt/mod.rs b/src/libcore/tests/num/dec2flt/mod.rs index 8f1cd32c3563c..2b582cf8a7fea 100644 --- a/src/libcore/tests/num/dec2flt/mod.rs +++ b/src/libcore/tests/num/dec2flt/mod.rs @@ -23,7 +23,7 @@ macro_rules! test_literal { }) } -#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630 +#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue #42630 #[test] fn ordinary() { test_literal!(1.0); @@ -34,7 +34,7 @@ fn ordinary() { test_literal!(2.2250738585072014e-308); } -#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630 +#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue #42630 #[test] fn special_code_paths() { test_literal!(36893488147419103229.0); // 2^65 - 3, triggers half-to-even with even significand diff --git a/src/libcore/tests/num/dec2flt/rawfp.rs b/src/libcore/tests/num/dec2flt/rawfp.rs index 747c1bfa3f9c2..002f8a22a41b4 100644 --- a/src/libcore/tests/num/dec2flt/rawfp.rs +++ b/src/libcore/tests/num/dec2flt/rawfp.rs @@ -76,7 +76,7 @@ fn rounding_overflow() { assert_eq!(rounded.k, adjusted_k + 1); } -#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630 +#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue #42630 #[test] fn prev_float_monotonic() { let mut x = 1.0; @@ -112,7 +112,7 @@ fn next_float_inf() { assert_eq!(next_float(f64::INFINITY), f64::INFINITY); } -#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630 +#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue #42630 #[test] fn next_prev_identity() { for &x in &SOME_FLOATS { @@ -123,7 +123,7 @@ fn next_prev_identity() { } } -#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630 +#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue #42630 #[test] fn next_float_monotonic() { let mut x = 0.49999999999999; diff --git a/src/libcore/tests/num/flt2dec/mod.rs b/src/libcore/tests/num/flt2dec/mod.rs index fed9ce73b2a8e..00f2f45b70d0b 100644 --- a/src/libcore/tests/num/flt2dec/mod.rs +++ b/src/libcore/tests/num/flt2dec/mod.rs @@ -831,7 +831,7 @@ pub fn to_exact_exp_str_test(mut f_: F) 0000000000000000000000000000000000000000000000000000000000000000\ 0000000000000000000000000000000000000000000000000000000000000000e308"); - // okay, this is becoming tough. fortunately for us, this is almost the worst case. + // ok, this is becoming tough. fortunately for us, this is almost the worst case. let minf64 = ldexp_f64(1.0, -1074); assert_eq!(to_string(f, minf64, Minus, 1, false), "5e-324"); assert_eq!(to_string(f, minf64, Minus, 2, false), "4.9e-324"); diff --git a/src/libcore/tests/num/flt2dec/strategy/dragon.rs b/src/libcore/tests/num/flt2dec/strategy/dragon.rs index 1803e39b46df3..0cec78d3f9274 100644 --- a/src/libcore/tests/num/flt2dec/strategy/dragon.rs +++ b/src/libcore/tests/num/flt2dec/strategy/dragon.rs @@ -14,7 +14,7 @@ fn test_mul_pow10() { } } -#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630 +#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue #42630 #[test] fn shortest_sanity_test() { f64_shortest_sanity_test(format_shortest); diff --git a/src/libcore/tests/num/flt2dec/strategy/grisu.rs b/src/libcore/tests/num/flt2dec/strategy/grisu.rs index 53e9f12ae0f14..41815be28b716 100644 --- a/src/libcore/tests/num/flt2dec/strategy/grisu.rs +++ b/src/libcore/tests/num/flt2dec/strategy/grisu.rs @@ -27,7 +27,7 @@ fn test_max_pow10_no_more_than() { } -#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue 42630 +#[cfg_attr(all(target_arch = "wasm32", target_os = "emscripten"), ignore)] // issue #42630 #[test] fn shortest_sanity_test() { f64_shortest_sanity_test(format_shortest); diff --git a/src/libcore/tests/pattern.rs b/src/libcore/tests/pattern.rs index b78ed0210770f..c4ec2b0e5e8a2 100644 --- a/src/libcore/tests/pattern.rs +++ b/src/libcore/tests/pattern.rs @@ -1,7 +1,6 @@ use std::str::pattern::*; -// This macro makes it easier to write -// tests that do a series of iterations +// This macro makes it easier to write tests that do a series of iterations. macro_rules! search_asserts { ($haystack:expr, $needle:expr, $testname:expr, [$($func:ident),*], $result:expr) => { let mut searcher = $needle.into_searcher($haystack); @@ -10,11 +9,10 @@ macro_rules! search_asserts { } } -/// Combined enum for the results of next() and next_match()/next_reject() +/// Combined enum for the results of `next()` and `next_match()`/`next_reject()`. #[derive(Debug, PartialEq, Eq)] enum Step { - // variant names purposely chosen to - // be the same length for easy alignment + // Variant names purposely chosen to be the same length for easy alignment. Matches(usize, usize), Rejects(usize, usize), InRange(usize, usize), @@ -44,11 +42,11 @@ impl From> for Step { // ignore-tidy-linelength -// FIXME(Manishearth) these tests focus on single-character searching (CharSearcher) -// and on next()/next_match(), not next_reject(). This is because -// the memchr changes make next_match() for single chars complex, but next_reject() -// continues to use next() under the hood. We should add more test cases for all -// of these, as well as tests for StrSearcher and higher level tests for str::find() (etc) +// FIXME(Manishearth): these tests focus on single-character searching (`CharSearcher`), +// and on `next()`/`next_match()`, not `next_reject()`. This is because +// the `memchr` changes make `next_match()` for single chars complex, but `next_reject()` +// continues to `use next()` under the hood. We should add more test cases for all +// of these, as well as tests for `StrSearcher` and higher level tests for `str::find()`, etc. #[test] fn test_simple_iteration() { @@ -106,21 +104,21 @@ fn test_simple_search() { ); } -// Á, 각, ก, 😀 all end in 0x81 -// 🁀, ᘀ do not end in 0x81 but contain the byte -// ꁁ has 0x81 as its second and third bytes. +// `Á`, `각`, `ก`, `😀` all end in `0x81`. +// `🁀`, `ᘀ` do not end in `0x81`, but contain the byte. +// `ꁁ` has `0x81` as its second and third bytes. // -// The memchr-using implementation of next_match -// and next_match_back temporarily violate +// The `memchr`-using implementation of `next_match` +// and `next_match_back` temporarily violate // the property that the search is always on a unicode boundary, -// which is fine as long as this never reaches next() or next_back(). -// So we test if next() is correct after each next_match() as well. +// which is fine as long as this never reaches `next()` or `next_back()`. +// So we test if `next()` is correct after each `next_match()` as well. const STRESS: &str = "Áa🁀bÁꁁfg😁각กᘀ각aÁ각ꁁก😁a"; #[test] fn test_stress_indices() { - // this isn't really a test, more of documentation on the indices of each character in the stresstest string - + // This isn't really a test -- more of documentation on the indices of each character in the + // stress-test string. search_asserts!(STRESS, 'x', "Indices of characters in stress test", [next, next, next, next, next, next, next, next, next, next, next, next, next, next, next, next, next, next, next, next, next], [Rejects(0, 2), // Á @@ -255,8 +253,7 @@ fn test_reverse_search_shared_bytes() { #[test] fn double_ended_regression_test() { - // https://github.com/rust-lang/rust/issues/47175 - // Ensures that double ended searching comes to a convergence + // Issue #47175: ensure that double-ended searching comes to a convergence. search_asserts!("abcdeabcdeabcde", 'a', "alternating double ended search", [next_match, next_match_back, next_match, next_match_back], [InRange(0, 1), InRange(10, 11), InRange(5, 6), Done] diff --git a/src/libcore/tests/slice.rs b/src/libcore/tests/slice.rs index 04d646ea01d03..110a914ac20b6 100644 --- a/src/libcore/tests/slice.rs +++ b/src/libcore/tests/slice.rs @@ -810,7 +810,7 @@ mod slice_index { // optional: // // one or more similar inputs for which data[input] succeeds, - // and the corresponding output as an array. This helps validate + // and the corresponding output as an array. This helps validate // "critical points" where an input range straddles the boundary // between valid and invalid. // (such as the input `len..len`, which is just barely valid) @@ -938,7 +938,7 @@ mod slice_index { data: [0, 1]; // note: using 0 specifically ensures that the result of overflowing is 0..0, - // so that `get` doesn't simply return None for the wrong reason. + // so that `get` doesn't simply return `None` for the wrong reason. bad: data[0 ..= ::std::usize::MAX]; message: "maximum usize"; } diff --git a/src/libcore/time.rs b/src/libcore/time.rs index ac7e11754aa3a..bced2fb0e01f5 100644 --- a/src/libcore/time.rs +++ b/src/libcore/time.rs @@ -221,7 +221,7 @@ impl Duration { /// Returns the fractional part of this `Duration`, in whole milliseconds. /// - /// This method does **not** return the length of the duration when + /// This method does *not* return the length of the duration when /// represented by milliseconds. The returned number always represents a /// fractional portion of a second (i.e., it is less than one thousand). /// @@ -240,7 +240,7 @@ impl Duration { /// Returns the fractional part of this `Duration`, in whole microseconds. /// - /// This method does **not** return the length of the duration when + /// This method does *not* return the length of the duration when /// represented by microseconds. The returned number always represents a /// fractional portion of a second (i.e., it is less than one million). /// @@ -259,7 +259,7 @@ impl Duration { /// Returns the fractional part of this `Duration`, in nanoseconds. /// - /// This method does **not** return the length of the duration when + /// This method does *not* return the length of the duration when /// represented by nanoseconds. The returned number always represents a /// fractional portion of a second (i.e., it is less than one billion). /// diff --git a/src/libcore/unicode/printable.rs b/src/libcore/unicode/printable.rs index a950e82cba241..a5a655eb662f1 100644 --- a/src/libcore/unicode/printable.rs +++ b/src/libcore/unicode/printable.rs @@ -1,5 +1,5 @@ -// NOTE: The following code was generated by "src/libcore/unicode/printable.py", -// do not edit directly! +// NOTE: the following code was generated by `src/libcore/unicode/printable.py`; +// do not edit directly! fn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], normal: &[u8]) -> bool { diff --git a/src/libcore/unicode/tables.rs b/src/libcore/unicode/tables.rs index edef4ca361e4f..0e1805dc18657 100644 --- a/src/libcore/unicode/tables.rs +++ b/src/libcore/unicode/tables.rs @@ -1,4 +1,4 @@ -// NOTE: The following code was generated by "./unicode.py", do not edit directly +// NOTE: the following code was generated by `./unicode.py`; do not edit directly! #![allow(missing_docs, non_upper_case_globals, non_snake_case)] diff --git a/src/libfmt_macros/lib.rs b/src/libfmt_macros/lib.rs index aacd6cec565a5..86ed4ad15fbc9 100644 --- a/src/libfmt_macros/lib.rs +++ b/src/libfmt_macros/lib.rs @@ -413,7 +413,7 @@ impl<'a> Parser<'a> { let pos = self.position(); let format = self.format(); - // Resolve position after parsing format spec. + // Resolves position after parsing format spec. let pos = match pos { Some(position) => position, None => { diff --git a/src/libpanic_unwind/dwarf/eh.rs b/src/libpanic_unwind/dwarf/eh.rs index 07fa2971847f6..4cee394a7a55e 100644 --- a/src/libpanic_unwind/dwarf/eh.rs +++ b/src/libpanic_unwind/dwarf/eh.rs @@ -100,7 +100,7 @@ pub unsafe fn find_eh_action(lsda: *const u8, context: &EHContext<'_>) } } } - // Ip is not present in the table. This should not happen... but it does: issue #35011. + // Ip is not present in the table. This should not happen... but it does: issue #35011. // So rather than returning EHAction::Terminate, we do this. Ok(EHAction::None) } else { diff --git a/src/libpanic_unwind/gcc.rs b/src/libpanic_unwind/gcc.rs index e2b743b379704..faf7f8ebf3b47 100644 --- a/src/libpanic_unwind/gcc.rs +++ b/src/libpanic_unwind/gcc.rs @@ -91,7 +91,7 @@ pub unsafe fn cleanup(ptr: *mut u8) -> Box { cause.unwrap() } -// Rust's exception class identifier. This is used by personality routines to +// Rust's exception class identifier. This is used by personality routines to // determine whether the exception was thrown by their own runtime. fn rust_exception_class() -> uw::_Unwind_Exception_Class { // M O Z \0 R U S T -- vendor, language @@ -99,11 +99,11 @@ fn rust_exception_class() -> uw::_Unwind_Exception_Class { } -// Register ids were lifted from LLVM's TargetLowering::getExceptionPointerRegister() -// and TargetLowering::getExceptionSelectorRegister() for each architecture, +// Register IDs were lifted from LLVM's `TargetLowering::getExceptionPointerRegister()` +// and `TargetLowering::getExceptionSelectorRegister()` for each architecture, // then mapped to DWARF register numbers via register definition tables -// (typically RegisterInfo.td, search for "DwarfRegNum"). -// See also http://llvm.org/docs/WritingAnLLVMBackend.html#defining-a-register. +// (typically `RegisterInfo.td`; search for "DwarfRegNum"). +// See also . #[cfg(target_arch = "x86")] const UNWIND_DATA_REG: (i32, i32) = (0, 2); // EAX, EDX @@ -126,12 +126,12 @@ const UNWIND_DATA_REG: (i32, i32) = (6, 7); // R6, R7 #[cfg(target_arch = "sparc64")] const UNWIND_DATA_REG: (i32, i32) = (24, 25); // I0, I1 -// The following code is based on GCC's C and C++ personality routines. For reference, see: +// The following code is based on GCC's C and C++ personality routines. For reference, see: // https://github.com/gcc-mirror/gcc/blob/master/libstdc++-v3/libsupc++/eh_personality.cc // https://github.com/gcc-mirror/gcc/blob/trunk/libgcc/unwind-c.c // The personality routine for most of our targets, except ARM, which has a slightly different ABI -// (however, iOS goes here as it uses SjLj unwinding). Also, the 64-bit Windows implementation +// (however, iOS goes here as it uses SjLj unwinding). Also, the 64-bit Windows implementation // lives in seh64_gnu.rs #[cfg(all(any(target_os = "ios", target_os = "netbsd", not(target_arch = "arm"))))] #[lang = "eh_personality"] @@ -281,12 +281,12 @@ unsafe extern "C" fn rust_eh_unwind_resume(panic_ctx: *mut u8) -> ! { uw::_Unwind_Resume(panic_ctx as *mut uw::_Unwind_Exception); } -// Frame unwind info registration +// Frame unwind info registration. // // Each module's image contains a frame unwind info section (usually -// ".eh_frame"). When a module is loaded/unloaded into the process, the +// ".eh_frame"). When a module is loaded/unloaded into the process, the // unwinder must be informed about the location of this section in memory. The -// methods of achieving that vary by the platform. On some (e.g., Linux), the +// methods of achieving that vary by the platform. On some (e.g., Linux), the // unwinder can discover unwind info sections on its own (by dynamically // enumerating currently loaded modules via the dl_iterate_phdr() API and // finding their ".eh_frame" sections); Others, like Windows, require modules diff --git a/src/libproc_macro/bridge/client.rs b/src/libproc_macro/bridge/client.rs index b198bdb144699..45d282f31dfab 100644 --- a/src/libproc_macro/bridge/client.rs +++ b/src/libproc_macro/bridge/client.rs @@ -15,7 +15,7 @@ macro_rules! define_handles { } impl HandleCounters { - // FIXME(#53451) public to work around `Cannot create local mono-item` ICE. + // FIXME(#53451): public to work around `cannot create local mono-item` ICE. pub extern "C" fn get() -> &'static Self { static COUNTERS: HandleCounters = HandleCounters { $($oty: AtomicUsize::new(1),)* @@ -25,7 +25,7 @@ macro_rules! define_handles { } } - // FIXME(eddyb) generate the definition of `HandleStore` in `server.rs`. + // FIXME(eddyb): generate the definition of `HandleStore` in `server.rs`. #[repr(C)] #[allow(non_snake_case)] pub(super) struct HandleStore { @@ -171,7 +171,7 @@ define_handles! { Span, } -// FIXME(eddyb) generate these impls by pattern-matching on the +// FIXME(eddyb): generate these impls by pattern-matching on the // names of methods - also could use the presence of `fn drop` // to distinguish between 'owned and 'interned, above. // Alternatively, special 'modes" could be listed of types in with_api @@ -201,7 +201,7 @@ impl Clone for Literal { } } -// FIXME(eddyb) `Literal` should not expose internal `Debug` impls. +// FIXME(eddyb): `Literal` should not expose internal `Debug` impls. impl fmt::Debug for Literal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(&self.debug()) @@ -281,7 +281,7 @@ impl BridgeState<'_> { fn with(f: impl FnOnce(&mut BridgeState<'_>) -> R) -> R { BRIDGE_STATE.with(|state| { state.replace(BridgeState::InUse, |mut state| { - // FIXME(#52812) pass `f` directly to `replace` when `RefMutL` is gone + // FIXME(#52812): pass `f` directly to `replace` when `RefMutL` is gone f(&mut *state) }) }) @@ -291,7 +291,7 @@ impl BridgeState<'_> { impl Bridge<'_> { fn enter(self, f: impl FnOnce() -> R) -> R { // Hide the default panic output within `proc_macro` expansions. - // NB. the server can't do this because it may use a different libstd. + // N.B., the server can't do this because it may use a different libstd. static HIDE_PANICS_DURING_EXPANSION: Once = Once::new(); HIDE_PANICS_DURING_EXPANSION.call_once(|| { let prev = panic::take_hook(); @@ -338,7 +338,7 @@ pub struct Client { pub(super) f: F, } -// FIXME(#53451) public to work around `Cannot create local mono-item` ICE, +// FIXME(#53451): public to work around `Cannot create local mono-item` ICE, // affecting not only the function itself, but also the `BridgeState` `thread_local!`. pub extern "C" fn __run_expand1( mut bridge: Bridge<'_>, @@ -360,7 +360,7 @@ pub extern "C" fn __run_expand1( // Take the `cached_buffer` back out, for the output value. b = Bridge::with(|bridge| bridge.cached_buffer.take()); - // HACK(eddyb) Separate encoding a success value (`Ok(output)`) + // HACK(eddyb): separate encoding a success value (`Ok(output)`) // from encoding a panic (`Err(e: PanicMessage)`) to avoid // having handles outside the `bridge.enter(|| ...)` scope, and // to catch panics that could happen while encoding the success. @@ -391,7 +391,7 @@ impl Client crate::TokenStream> { } } -// FIXME(#53451) public to work around `Cannot create local mono-item` ICE, +// FIXME(#53451): public to work around `Cannot create local mono-item` ICE, // affecting not only the function itself, but also the `BridgeState` `thread_local!`. pub extern "C" fn __run_expand2( mut bridge: Bridge<'_>, @@ -414,7 +414,7 @@ pub extern "C" fn __run_expand2( // Take the `cached_buffer` back out, for the output value. b = Bridge::with(|bridge| bridge.cached_buffer.take()); - // HACK(eddyb) Separate encoding a success value (`Ok(output)`) + // HACK(eddyb): separate encoding a success value (`Ok(output)`) // from encoding a panic (`Err(e: PanicMessage)`) to avoid // having handles outside the `bridge.enter(|| ...)` scope, and // to catch panics that could happen while encoding the success. diff --git a/src/libproc_macro/bridge/mod.rs b/src/libproc_macro/bridge/mod.rs index 3173651b03951..68a2511a6cf10 100644 --- a/src/libproc_macro/bridge/mod.rs +++ b/src/libproc_macro/bridge/mod.rs @@ -103,7 +103,7 @@ macro_rules! with_api { Literal { fn drop($self: $S::Literal); fn clone($self: &$S::Literal) -> $S::Literal; - // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. + // FIXME(eddyb): `Literal` should not expose internal `Debug` impls. fn debug($self: &$S::Literal) -> String; fn integer(n: &str) -> $S::Literal; fn typed_integer(n: &str, kind: &str) -> $S::Literal; @@ -160,7 +160,7 @@ macro_rules! with_api { }; } -// FIXME(eddyb) this calls `encode` for each argument, but in reverse, +// FIXME(eddyb): this calls `encode` for each argument, but in reverse, // to avoid borrow conflicts from borrows started by `&mut` arguments. macro_rules! reverse_encode { ($writer:ident;) => {}; @@ -170,7 +170,7 @@ macro_rules! reverse_encode { } } -// FIXME(eddyb) this calls `decode` for each argument, but in reverse, +// FIXME(eddyb): this calls `decode` for each argument, but in reverse, // to avoid borrow conflicts from borrows started by `&mut` arguments. macro_rules! reverse_decode { ($reader:ident, $s:ident;) => {}; diff --git a/src/libproc_macro/bridge/scoped_cell.rs b/src/libproc_macro/bridge/scoped_cell.rs index 6f7965095b638..f6a010f87658f 100644 --- a/src/libproc_macro/bridge/scoped_cell.rs +++ b/src/libproc_macro/bridge/scoped_cell.rs @@ -14,8 +14,8 @@ pub trait LambdaL: for<'a> ApplyL<'a> {} impl ApplyL<'a>> LambdaL for T {} -// HACK(eddyb) work around projection limitations with a newtype -// FIXME(#52812) replace with `&'a mut >::Out` +// HACK(eddyb): work around projection limitations with a newtype. +// FIXME(#52812): replace with `&'a mut >::Out`. pub struct RefMutL<'a, 'b, T: LambdaL>(&'a mut >::Out); impl<'a, 'b, T: LambdaL> Deref for RefMutL<'a, 'b, T> { diff --git a/src/libproc_macro/bridge/server.rs b/src/libproc_macro/bridge/server.rs index 75806eb9d1760..bd62da169f403 100644 --- a/src/libproc_macro/bridge/server.rs +++ b/src/libproc_macro/bridge/server.rs @@ -2,7 +2,7 @@ use super::*; -// FIXME(eddyb) generate the definition of `HandleStore` in `server.rs`. +// FIXME(eddyb): generate the definition of `HandleStore` in `server.rs`. use super::client::HandleStore; /// Declare an associated item of one of the traits below, optionally @@ -83,9 +83,9 @@ macro_rules! define_dispatcher_impl { ($($name:ident { $(fn $method:ident($($arg:ident: $arg_ty:ty),* $(,)*) $(-> $ret_ty:ty)*;)* }),* $(,)*) => { - // FIXME(eddyb) `pub` only for `ExecutionStrategy` below. + // FIXME(eddyb): `pub` only for `ExecutionStrategy` below. pub trait DispatcherTrait { - // HACK(eddyb) these are here to allow `Self::$name` to work below. + // HACK(eddyb): these are here to allow `Self::$name` to work below. $(type $name;)* fn dispatch(&mut self, b: Buffer) -> Buffer; } @@ -103,7 +103,7 @@ macro_rules! define_dispatcher_impl { reverse_decode!(reader, handle_store; $($arg: $arg_ty),*); $name::$method(server, $($arg),*) }; - // HACK(eddyb) don't use `panic::catch_unwind` in a panic. + // HACK(eddyb): don't use `panic::catch_unwind` in a panic. // If client and server happen to use the same `libstd`, // `catch_unwind` asserts that the panic counter was 0, // even when the closure passed to it didn't panic. diff --git a/src/libproc_macro/lib.rs b/src/libproc_macro/lib.rs index 6c061189d00d7..7417c976da599 100644 --- a/src/libproc_macro/lib.rs +++ b/src/libproc_macro/lib.rs @@ -177,7 +177,7 @@ impl Extend for TokenStream { #[stable(feature = "token_stream_extend", since = "1.30.0")] impl Extend for TokenStream { fn extend>(&mut self, streams: I) { - // FIXME(eddyb) Use an optimized implementation if/when possible. + // FIXME(eddyb): use an optimized implementation if/when possible. *self = iter::once(mem::replace(self, Self::new())).chain(streams).collect(); } } @@ -646,7 +646,7 @@ impl Group { /// Configures the span for this `Group`'s delimiters, but not its internal /// tokens. /// - /// This method will **not** set the span of all the internal tokens spanned + /// This method will *not* set the span of all the internal tokens spanned /// by this group, but rather it will only set the span of the delimiter /// tokens at the level of the `Group`. #[stable(feature = "proc_macro_lib2", since = "1.29.0")] @@ -1065,7 +1065,7 @@ impl Literal { // was 'c' or whether it was '\u{63}'. #[unstable(feature = "proc_macro_span", issue = "54725")] pub fn subspan>(&self, range: R) -> Option { - // HACK(eddyb) something akin to `Option::cloned`, but for `Bound<&T>`. + // HACK(eddyb): something akin to `Option::cloned`, but for `Bound<&T>`. fn cloned_bound(bound: Bound<&T>) -> Bound { match bound { Bound::Included(x) => Bound::Included(x.clone()), @@ -1102,7 +1102,7 @@ impl fmt::Display for Literal { #[stable(feature = "proc_macro_lib2", since = "1.29.0")] impl fmt::Debug for Literal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // FIXME(eddyb) `Literal` should not expose internal `Debug` impls. + // FIXME(eddyb): `Literal` should not expose internal `Debug` impls. self.0.fmt(f) } } diff --git a/src/librustc/cfg/construct.rs b/src/librustc/cfg/construct.rs index f7ffbe8c65833..269f3118512ba 100644 --- a/src/librustc/cfg/construct.rs +++ b/src/librustc/cfg/construct.rs @@ -19,15 +19,20 @@ struct CFGBuilder<'a, 'tcx: 'a> { #[derive(Copy, Clone)] struct BlockScope { - block_expr_id: hir::ItemLocalId, // id of breakable block expr node - break_index: CFGIndex, // where to go on `break` + // ID of breakable block expr node. + block_expr_id: hir::ItemLocalId, + // Where to go on `break`. + break_index: CFGIndex, } #[derive(Copy, Clone)] struct LoopScope { - loop_id: hir::ItemLocalId, // id of loop/while node - continue_index: CFGIndex, // where to go on a `loop` - break_index: CFGIndex, // where to go on a `break` + // ID of `loop`/`while` node. + loop_id: hir::ItemLocalId, + // Where to go on a `loop`. + continue_index: CFGIndex, + // Where to go on `break`. + break_index: CFGIndex, } pub fn construct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, @@ -475,7 +480,7 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> { // #47295: We used to have very special case code // here for when a pair of arms are both formed // solely from constants, and if so, not add these - // edges. But this was not actually sound without + // edges. But this was not actually sound without // other constraints that we stopped enforcing at // some point. while let Some(prev) = prev_guards.pop() { diff --git a/src/librustc/dep_graph/dep_node.rs b/src/librustc/dep_graph/dep_node.rs index 796739c872174..c926c42100b83 100644 --- a/src/librustc/dep_graph/dep_node.rs +++ b/src/librustc/dep_graph/dep_node.rs @@ -151,7 +151,7 @@ macro_rules! define_dep_nodes { } } - // FIXME: Make `is_anon`, `is_input`, `is_eval_always` and `has_params` properties + // FIXME: make `is_anon`, `is_input`, `is_eval_always` and `has_params` properties // of queries #[inline(always)] pub fn is_anon(&self) -> bool { @@ -428,23 +428,23 @@ define_dep_nodes!( <'tcx> [] Null, // Represents the `Krate` as a whole (the `hir::Krate` value) (as - // distinct from the krate module). This is basically a hash of + // distinct from the `krate` module). This is basically a hash of // the entire krate, so if you read from `Krate` (e.g., by calling // `tcx.hir().krate()`), we will have to assume that any change // means that you need to be recompiled. This is because the // `Krate` value gives you access to all other items. To avoid // this fate, do not call `tcx.hir().krate()`; instead, prefer - // wrappers like `tcx.visit_all_items_in_krate()`. If there is no + // wrappers like `tcx.visit_all_items_in_krate()`. If there is no // suitable wrapper, you can use `tcx.dep_graph.ignore()` to gain - // access to the krate, but you must remember to add suitable + // access to the crate, but you must remember to add suitable // edges yourself for the individual items that you read. [input] Krate, - // Represents the body of a function or method. The def-id is that of the + // Represents the body of a function or method. The `DefId` is that of the // function/method. [input] HirBody(DefId), - // Represents the HIR node with the given node-id + // Represents the HIR node with the given `NodeId`. [input] Hir(DefId), // Represents metadata from an extern crate. @@ -548,7 +548,7 @@ define_dep_nodes!( <'tcx> [] Environment(DefId), [] DescribeDef(DefId), - // FIXME(mw): DefSpans are not really inputs since they are derived from + // FIXME(mw): `DefSpan`s are not really inputs since they are derived from // HIR. But at the moment HIR hashing still contains some hacks that allow // to make type debuginfo to be source location independent. Declaring // DefSpan an input makes sure that changes to these are always detected diff --git a/src/librustc/dep_graph/graph.rs b/src/librustc/dep_graph/graph.rs index 8a2f79e6793c0..69786f63bd785 100644 --- a/src/librustc/dep_graph/graph.rs +++ b/src/librustc/dep_graph/graph.rs @@ -75,7 +75,7 @@ struct DepGraphData { dep_node_debug: Lock>, - // Used for testing, only populated when -Zquery-dep-graph is specified. + // Used for testing, only populated when the `-Zquery-dep-graph` compiler flag is specified. loaded_from_cache: Lock>, } @@ -162,7 +162,7 @@ impl DepGraph { } /// Starts a new dep-graph task. Dep-graph tasks are specified - /// using a free function (`task`) and **not** a closure -- this + /// using a free function (`task`) and *not* a closure -- this /// is intentional because we want to exercise tight control over /// what state they have access to. In particular, we want to /// prevent implicit 'leaks' of tracked state into the task (which @@ -256,8 +256,8 @@ impl DepGraph { // In incremental mode, hash the result of the task. We don't // do anything with the hash yet, but we are computing it // anyway so that - // - we make sure that the infrastructure works and - // - we can get an idea of the runtime cost. + // - we make sure that the infrastructure works and + // - we can get an idea of the runtime cost. let mut hcx = cx.get_stable_hashing_context(); if cfg!(debug_assertions) { @@ -294,7 +294,7 @@ impl DepGraph { let print_status = cfg!(debug_assertions) && hcx.sess().opts.debugging_opts.dep_tasks; - // Determine the color of the new DepNode. + // Determine the color of the new `DepNode`. if let Some(prev_index) = data.previous.node_to_index_opt(&key) { let prev_fingerprint = data.previous.fingerprint_by_index(prev_index); @@ -522,7 +522,7 @@ impl DepGraph { for (current_dep_node_index, edges) in current_dep_graph.data.iter_enumerated() .map(|(i, d)| (i, &d.edges)) { let start = edge_list_data.len() as u32; - // This should really just be a memcpy :/ + // FIXME: this should really just be a `memcpy`. edge_list_data.extend(edges.iter().map(|i| SerializedDepNodeIndex::new(i.index()))); let end = edge_list_data.len() as u32; @@ -578,17 +578,17 @@ impl DepGraph { ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { debug_assert!(!dep_node.kind.is_input()); - // Return None if the dep graph is disabled + // Return `None` if the dep graph is disabled. let data = self.data.as_ref()?; - // Return None if the dep node didn't exist in the previous session + // Return `None` if the dep node didn't exist in the previous session. let prev_index = data.previous.node_to_index_opt(dep_node)?; match data.colors.get(prev_index) { Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)), Some(DepNodeColor::Red) => None, None => { - // This DepNode and the corresponding query invocation existed + // This `DepNode` and the corresponding query invocation existed. // in the previous compilation session too, so we can try to // mark it as green by recursively marking all of its // dependencies green. @@ -620,7 +620,7 @@ impl DepGraph { debug_assert!(data.colors.get(prev_dep_node_index).is_none()); } - // We never try to mark inputs as green + // We never try to mark inputs as green. debug_assert!(!dep_node.kind.is_input()); debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); @@ -733,7 +733,7 @@ impl DepGraph { } } } else { - // The DepNode could not be forced. + // The `DepNode` could not be forced. debug!("try_mark_previous_green({:?}) - END - dependency {:?} \ could not be forced", dep_node, dep_dep_node); return None @@ -743,24 +743,24 @@ impl DepGraph { } // If we got here without hitting a `return` that means that all - // dependencies of this DepNode could be marked as green. Therefore we - // can also mark this DepNode as green. + // dependencies of this `DepNode` could be marked as green. Therefore, we + // can also mark this `DepNode` as green. - // There may be multiple threads trying to mark the same dep node green concurrently + // There may be multiple threads trying to mark the same dep node green concurrently. let (dep_node_index, did_allocation) = { let mut current = data.current.borrow_mut(); // Copy the fingerprint from the previous graph, - // so we don't have to recompute it + // so we don't have to recompute it. let fingerprint = data.previous.fingerprint_by_index(prev_dep_node_index); // We allocating an entry for the node in the current dependency graph and - // adding all the appropriate edges imported from the previous graph + // adding all the appropriate edges imported from the previous graph. current.intern_node(*dep_node, current_deps, fingerprint) }; - // ... emitting any stored diagnostic ... + // ... emitting any stored diagnostic, ... let diagnostics = tcx.queries.on_disk_cache .load_diagnostics(tcx, prev_dep_node_index); @@ -775,8 +775,8 @@ impl DepGraph { ); } - // ... and finally storing a "Green" entry in the color map. - // Multiple threads can all write the same color here + // ... and finally storing a "green" entry in the color map. + // Multiple threads can all write the same color here. #[cfg(not(parallel_compiler))] debug_assert!(data.colors.get(prev_dep_node_index).is_none(), "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ @@ -801,7 +801,7 @@ impl DepGraph { diagnostics: Vec, ) { if did_allocation || !cfg!(parallel_compiler) { - // Only the thread which did the allocation emits the error messages + // Only the thread which did the allocation emits the error messages. let handle = tcx.sess.diagnostic(); // Promote the previous diagnostics to the current session. @@ -814,12 +814,12 @@ impl DepGraph { #[cfg(parallel_compiler)] { - // Mark the diagnostics and emitted and wake up waiters + // Mark the diagnostics and emitted and wake up waiters. data.emitted_diagnostics.lock().insert(dep_node_index); data.emitted_diagnostics_cond_var.notify_all(); } } else { - // The other threads will wait for the diagnostics to be emitted + // The other threads will wait for the diagnostics to be emitted. let mut emitted_diagnostics = data.emitted_diagnostics.lock(); loop { @@ -831,20 +831,20 @@ impl DepGraph { } } - // Returns true if the given node has been marked as green during the - // current compilation session. Used in various assertions + // Returns `true` if the given node has been marked as green during the + // current compilation session. Used in various assertions. pub fn is_green(&self, dep_node: &DepNode) -> bool { self.node_color(dep_node).map(|c| c.is_green()).unwrap_or(false) } - // This method loads all on-disk cacheable query results into memory, so - // they can be written out to the new cache file again. Most query results - // will already be in memory but in the case where we marked something as - // green but then did not need the value, that value will never have been - // loaded from disk. - // - // This method will only load queries that will end up in the disk cache. - // Other queries will not be executed. + /// Loads all on-disk cacheable query results into memory, so + /// they can be written out to the new cache file again. Most query results + /// will already be in memory but in the case where we marked something as + /// green but then did not need the value, that value will never have been + /// loaded from disk. + /// + /// This method will only load queries that will end up in the disk cache. + /// Other queries will not be executed. pub fn exec_cache_promotions<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) { let green_nodes: Vec = { let data = self.data.as_ref().unwrap(); @@ -1018,15 +1018,15 @@ impl CurrentDepGraph { fingerprint: Fingerprint ) -> DepNodeIndex { // If this is an input node, we expect that it either has no - // dependencies, or that it just depends on DepKind::CrateMetadata - // or DepKind::Krate. This happens for some "thin wrapper queries" + // dependencies, or that it just depends on `DepKind::CrateMetadata` + // or `DepKind::Krate`. This happens for some "thin wrapper queries" // like `crate_disambiguator` which sometimes have zero deps (for - // when called for LOCAL_CRATE) or they depend on a CrateMetadata + // when called for `LOCAL_CRATE`) or they depend on a `CrateMetadata` // node. if cfg!(debug_assertions) { if node.kind.is_input() && task_deps.reads.len() > 0 && - // FIXME(mw): Special case for DefSpan until Spans are handled - // better in general. + // FIXME(mw): special case for `DefSpan` until `Span`s are handled + // better in general. node.kind != DepKind::DefSpan && task_deps.reads.iter().any(|&i| { !(self.data[i].node.kind == DepKind::CrateMetadata || @@ -1053,8 +1053,8 @@ impl CurrentDepGraph { ::std::mem::discriminant(&read_dep_node.kind).hash(&mut hasher); - // Fingerprint::combine() is faster than sending Fingerprint - // through the StableHasher (at least as long as StableHasher + // `Fingerprint::combine()` is faster than sending `Fingerprint` + // through the `StableHasher` (at least as long as `StableHasher` // is so slow). fingerprint = fingerprint.combine(read_dep_node.hash); } @@ -1144,8 +1144,8 @@ pub struct TaskDeps { read_set: FxHashSet, } -// A data structure that stores Option values as a contiguous -// array, using one u32 per entry. +// A data structure that stores `Option` values as a contiguous +// array, using one `u32` per entry. struct DepNodeColorMap { values: IndexVec, } diff --git a/src/librustc/diagnostics.rs b/src/librustc/diagnostics.rs index f46ff6f6062c2..8f794e6df71ee 100644 --- a/src/librustc/diagnostics.rs +++ b/src/librustc/diagnostics.rs @@ -1793,7 +1793,7 @@ extern "C" { fn foo(x: S); } impl S { fn foo(self) { /* ... */ } } ``` -the type of `foo` is **not** `fn(S)`, as one might expect. +the type of `foo` is *not* `fn(S)`, as one might expect. Rather, it is a unique, zero-sized marker type written here as `typeof(foo)`. However, `typeof(foo)` can be _coerced_ to a function pointer `fn(S)`, so you rarely notice this: diff --git a/src/librustc/hir/lowering.rs b/src/librustc/hir/lowering.rs index 84487c40f8745..53947aa6e2805 100644 --- a/src/librustc/hir/lowering.rs +++ b/src/librustc/hir/lowering.rs @@ -74,7 +74,7 @@ const HIR_ID_COUNTER_LOCKED: u32 = 0xFFFFFFFF; pub struct LoweringContext<'a> { crate_root: Option<&'static str>, - // Used to assign ids to HIR nodes that do not directly correspond to an AST node. + // Used to assign IDs to HIR nodes that do not directly correspond to an AST node. sess: &'a Session, cstore: &'a dyn CrateStore, @@ -1969,7 +1969,7 @@ impl<'a> LoweringContext<'a> { &mut self, data: &ParenthesizedArgs, ) -> (hir::GenericArgs, bool) { - // Switch to `PassThrough` mode for anonymous lifetimes: this + // Switch to `PassThrough` mode for anonymous lifetimes; this // means that we permit things like `&Ref`, where `Ref` has // a hidden lifetime parameter. This is needed for backwards // compatibility, even in contexts like an impl header where @@ -2331,7 +2331,7 @@ impl<'a> LoweringContext<'a> { } }; - // "" + // `` let LoweredNodeId { node_id, hir_id } = this.next_id(); let future_params = P(hir::GenericArgs { args: hir_vec![], @@ -3198,7 +3198,7 @@ impl<'a> LoweringContext<'a> { // // The first two are produced by recursively invoking // `lower_use_tree` (and indeed there may be things - // like `use foo::{a::{b, c}}` and so forth). They + // like `use foo::{a::{b, c}}` and so forth). They // wind up being directly added to // `self.items`. However, the structure of this // function also requires us to return one item, and @@ -3223,7 +3223,7 @@ impl<'a> LoweringContext<'a> { let mut ident = ident.clone(); let mut prefix = prefix.clone(); - // Give the segments new node-ids since they are being cloned. + // Give the segments new `NodeId`s since they are being cloned. for seg in &mut prefix.segments { seg.id = self.sess.next_node_id(); } @@ -3279,7 +3279,7 @@ impl<'a> LoweringContext<'a> { // because that affects rustdoc and also the lints // about `pub` items. But we can't *always* make it // private -- particularly not for restricted paths -- - // because it contains node-ids that would then be + // because it contains `NodeId`s that would then be // unused, failing the check that HirIds are "densely // assigned". match vis.node { diff --git a/src/librustc/hir/map/definitions.rs b/src/librustc/hir/map/definitions.rs index f454d691d4188..017f9cc86dda7 100644 --- a/src/librustc/hir/map/definitions.rs +++ b/src/librustc/hir/map/definitions.rs @@ -22,17 +22,17 @@ use syntax::symbol::{Symbol, InternedString}; use syntax_pos::{Span, DUMMY_SP}; use crate::util::nodemap::NodeMap; -/// The DefPathTable maps DefIndexes to DefKeys and vice versa. -/// Internally the DefPathTable holds a tree of DefKeys, where each DefKey -/// stores the DefIndex of its parent. -/// There is one DefPathTable for each crate. +/// Maps `DefIndex`es to `DefKey`s and vice versa. +/// Internally, holds a tree of `DefKey`s, where each `DefKey` +/// stores the `DefIndex` of its parent. +/// There is one `DefPathTable` for each crate. #[derive(Default)] pub struct DefPathTable { index_to_key: [Vec; 2], def_path_hashes: [Vec; 2], } -// Unfortunately we have to provide a manual impl of Clone because of the +// Unfortunately we have to provide a manual impl of `Clone` because of the // fixed-sized array field. impl Clone for DefPathTable { fn clone(&self) -> Self { @@ -107,14 +107,13 @@ impl DefPathTable { } } - impl Encodable for DefPathTable { fn encode(&self, s: &mut S) -> Result<(), S::Error> { - // Index to key + // Index to key. self.index_to_key[DefIndexAddressSpace::Low.index()].encode(s)?; self.index_to_key[DefIndexAddressSpace::High.index()].encode(s)?; - // DefPath hashes + // `DefPath` hashes. self.def_path_hashes[DefIndexAddressSpace::Low.index()].encode(s)?; self.def_path_hashes[DefIndexAddressSpace::High.index()].encode(s)?; @@ -174,7 +173,7 @@ impl DefKey { fn compute_stable_hash(&self, parent_hash: DefPathHash) -> DefPathHash { let mut hasher = StableHasher::new(); - // We hash a 0u8 here to disambiguate between regular DefPath hashes, + // We hash a `0u8` here to disambiguate between regular `DefPath` hashes // and the special "root_parent" below. 0u8.hash(&mut hasher); parent_hash.hash(&mut hasher); @@ -198,8 +197,8 @@ impl DefKey { crate_disambiguator: CrateDisambiguator) -> DefPathHash { let mut hasher = StableHasher::new(); - // Disambiguate this from a regular DefPath hash, - // see compute_stable_hash() above. + // Disambiguate this from a regular `DefPath` hash; + // see `compute_stable_hash()` above. 1u8.hash(&mut hasher); crate_name.hash(&mut hasher); crate_disambiguator.hash(&mut hasher); @@ -330,52 +329,59 @@ impl DefPath { pub enum DefPathData { // Root: these should only be used for the root nodes, because // they are treated specially by the `def_path` function. - /// The crate root (marker) + + /// The crate root (marker). CrateRoot, - // Catch-all for random DefId things like DUMMY_NODE_ID + // Catch-all for random `DefId` things like `DUMMY_NODE_ID`. Misc, - // Different kinds of items and item-like things: - /// An impl + + // Different kinds of items and item-like things. + + /// An impl. Impl, - /// A trait + /// A trait. Trait(InternedString), - /// An associated type **declaration** (i.e., in a trait) + /// An associated type **declaration** (i.e., in a trait). AssocTypeInTrait(InternedString), - /// An associated type **value** (i.e., in an impl) + /// An associated type **value** (i.e., in an impl). AssocTypeInImpl(InternedString), - /// An existential associated type **value** (i.e., in an impl) + /// An existential associated type **value** (i.e., in an impl). AssocExistentialInImpl(InternedString), - /// Something in the type NS + /// Something in the type namespace. TypeNs(InternedString), - /// Something in the value NS + /// Something in the value namespace. ValueNs(InternedString), - /// A module declaration + /// A module declaration. Module(InternedString), - /// A macro rule + /// A macro rule. MacroDef(InternedString), - /// A closure expression + /// A closure expression. ClosureExpr, - // Subportions of items - /// A type (generic) parameter + + // Subportions of items. + + /// A type (generic) parameter. TypeParam(InternedString), - /// A lifetime (generic) parameter + /// A lifetime (generic) parameter. LifetimeParam(InternedString), - /// A const (generic) parameter + /// A const (generic) parameter. ConstParam(InternedString), /// A variant of a enum EnumVariant(InternedString), - /// A struct field + /// A struct field. Field(InternedString), - /// Implicit ctor for a tuple-like struct + /// Implicit ctor for a tuple-like struct. StructCtor, - /// A constant expression (see {ast,hir}::AnonConst). + /// A constant expression (see `{ast,hir}::AnonConst`). AnonConst, - /// An `impl Trait` type node + /// An `impl Trait` type node. ImplTrait, - /// GlobalMetaData identifies a piece of crate metadata that is global to - /// a whole crate (as opposed to just one item). GlobalMetaData components + + /// Identifies a piece of crate metadata that is global to + /// a whole crate (as opposed to just one item). These components /// are only supposed to show up right below the crate root. GlobalMetaData(InternedString), + /// A trait alias. TraitAlias(InternedString), } @@ -469,7 +475,7 @@ impl Definitions { } } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. #[inline] pub fn as_local_hir_id(&self, def_id: DefId) -> Option { if def_id.krate == LOCAL_CRATE { @@ -533,13 +539,13 @@ impl Definitions { self.def_index_to_node[address_space.index()].push(ast::CRATE_NODE_ID); self.node_to_def_index.insert(ast::CRATE_NODE_ID, root_index); - // Allocate some other DefIndices that always must exist. + // Allocate some other `DefIndex`es that always must exist. GlobalMetaDataKind::allocate_def_indices(self); root_index } - /// Add a definition with a parent definition. + /// Adds a definition with a parent definition. pub fn create_def_with_parent(&mut self, parent: DefIndex, node_id: ast::NodeId, @@ -557,7 +563,7 @@ impl Definitions { data, self.table.def_key(self.node_to_def_index[&node_id])); - // The root node must be created with create_root_def() + // The root node must be created with `create_root_def()`. assert!(data != DefPathData::CrateRoot); // Find the next free disambiguator for this key. @@ -586,9 +592,9 @@ impl Definitions { self.def_index_to_node[address_space.index()].len()); self.def_index_to_node[address_space.index()].push(node_id); - // Some things for which we allocate DefIndices don't correspond to - // anything in the AST, so they don't have a NodeId. For these cases - // we don't need a mapping from NodeId to DefIndex. + // Some things for which we allocate `DefIndex`es don't correspond to + // anything in the AST, so they don't have a `NodeId`. For these cases + // we don't need a mapping from NodeId to `DefIndex`. if node_id != ast::DUMMY_NODE_ID { debug!("create_def_with_parent: def_index_to_node[{:?} <-> {:?}", index, node_id); self.node_to_def_index.insert(node_id, index); @@ -598,7 +604,7 @@ impl Definitions { self.expansions_that_defined.insert(index, expansion); } - // The span is added if it isn't dummy + // The span is added if it isn't dummy. if !span.is_dummy() { self.def_index_to_span.insert(index, span); } @@ -678,7 +684,7 @@ impl DefPathData { GlobalMetaData(name) => { return name } - // note that this does not show up in user printouts + // Note that this does not show up in user print-outs. CrateRoot => "{{root}}", Impl => "{{impl}}", Misc => "{{?}}", @@ -701,9 +707,9 @@ macro_rules! count { ( $x:tt $($xs:tt)* ) => (1usize + count!($($xs)*)); } -// We define the GlobalMetaDataKind enum with this macro because we want to +// We define the `GlobalMetaDataKind` enum with this macro because we want to // make sure that we exhaustively iterate over all variants when registering -// the corresponding DefIndices in the DefTable. +// the corresponding `DefIndex`es in the `DefTable`. macro_rules! define_global_metadata_kind { (pub enum GlobalMetaDataKind { $($variant:ident),* @@ -729,7 +735,7 @@ macro_rules! define_global_metadata_kind { DUMMY_SP ); - // Make sure calling def_index does not crash. + // Make sure that calling `def_index` does not crash. instance.def_index(&definitions.table); })* } @@ -743,7 +749,7 @@ macro_rules! define_global_metadata_kind { } }; - // These DefKeys are all right after the root, + // These `DefKey`s are all right after the root, // so a linear search is fine. let index = def_path_table.index_to_key[GLOBAL_MD_ADDRESS_SPACE.index()] .iter() diff --git a/src/librustc/hir/map/hir_id_validator.rs b/src/librustc/hir/map/hir_id_validator.rs index 2c3ff4c9b5c05..aaf2ef60aef62 100644 --- a/src/librustc/hir/map/hir_id_validator.rs +++ b/src/librustc/hir/map/hir_id_validator.rs @@ -96,12 +96,12 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { .expect("owning item has no entry"); if max != self.hir_ids_seen.len() - 1 { - // Collect the missing ItemLocalIds + // Collect the missing `ItemLocalId`s. let missing: Vec<_> = (0 ..= max as u32) .filter(|&i| !self.hir_ids_seen.contains_key(&ItemLocalId::from_u32(i))) .collect(); - // Try to map those to something more useful + // Try to map those to something more useful. let mut missing_items = Vec::with_capacity(missing.len()); for local_id in missing { @@ -112,8 +112,7 @@ impl<'a, 'hir: 'a> HirIdValidator<'a, 'hir> { trace!("missing hir id {:#?}", hir_id); - // We are already in ICE mode here, so doing a linear search - // should be fine. + // We are already in ICE mode here, so doing a linear search should be fine. let (node_id, _) = self.hir_map .definitions() .node_to_hir_id diff --git a/src/librustc/hir/map/mod.rs b/src/librustc/hir/map/mod.rs index b6cf4c1b84d0c..b161e1bb2bbd0 100644 --- a/src/librustc/hir/map/mod.rs +++ b/src/librustc/hir/map/mod.rs @@ -208,7 +208,7 @@ impl<'hir> Map<'hir> { } } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn read_by_hir_id(&self, hir_id: HirId) { let node_id = self.hir_to_node_id(hir_id); self.read(node_id); @@ -230,7 +230,7 @@ impl<'hir> Map<'hir> { }) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn def_path_from_hir_id(&self, id: HirId) -> DefPath { self.def_path(self.local_def_id_from_hir_id(id)) } @@ -248,7 +248,7 @@ impl<'hir> Map<'hir> { }) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. #[inline] pub fn local_def_id_from_hir_id(&self, hir_id: HirId) -> DefId { let node_id = self.hir_to_node_id(hir_id); @@ -258,7 +258,7 @@ impl<'hir> Map<'hir> { }) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. #[inline] pub fn opt_local_def_id_from_hir_id(&self, hir_id: HirId) -> Option { let node_id = self.hir_to_node_id(hir_id); @@ -275,7 +275,7 @@ impl<'hir> Map<'hir> { self.definitions.as_local_node_id(def_id) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. #[inline] pub fn as_local_hir_id(&self, def_id: DefId) -> Option { self.definitions.as_local_hir_id(def_id) @@ -603,7 +603,7 @@ impl<'hir> Map<'hir> { // Read the module so we'll be re-executed if new items // appear immediately under in the module. If some new item appears // in some nested item in the module, we'll be re-executed due to reads - // in the expect_* calls the loops below + // in the `expect_*` calls the loops below. self.read(node_id); let module = &self.forest.krate.modules[&node_id]; @@ -621,21 +621,22 @@ impl<'hir> Map<'hir> { } } - /// Retrieve the Node corresponding to `id`, panicking if it cannot + /// Retrieves the `Node` corresponding to `id`, panicking if it cannot /// be found. pub fn get(&self, id: NodeId) -> Node<'hir> { // read recorded by `find` self.find(id).unwrap_or_else(|| bug!("couldn't find node id {} in the AST map", id)) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn get_by_hir_id(&self, id: HirId) -> Node<'hir> { let node_id = self.hir_to_node_id(id); self.get(node_id) } pub fn get_if_local(&self, id: DefId) -> Option> { - self.as_local_node_id(id).map(|id| self.get(id)) // read recorded by `get` + // Read is recorded by `get`. + self.as_local_node_id(id).map(|id| self.get(id)) } pub fn get_generics(&self, id: DefId) -> Option<&'hir Generics> { @@ -680,13 +681,13 @@ impl<'hir> Map<'hir> { result } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn find_by_hir_id(&self, hir_id: HirId) -> Option> { let node_id = self.hir_to_node_id(hir_id); self.find(node_id) } - /// Similar to `get_parent`; returns the parent node-id, or own `id` if there is + /// Similar to `get_parent`; returns the parent `NodeId`, or own `id` if there is /// no parent. Note that the parent may be `CRATE_NODE_ID`, which is not itself /// present in the map -- so passing the return value of get_parent_node to /// get may actually panic. @@ -706,14 +707,14 @@ impl<'hir> Map<'hir> { self.find_entry(id).and_then(|x| x.parent_node()).unwrap_or(id) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn get_parent_node_by_hir_id(&self, id: HirId) -> HirId { let node_id = self.hir_to_node_id(id); let parent_node_id = self.get_parent_node(node_id); self.node_to_hir_id(parent_node_id) } - /// Check if the node is an argument. An argument is a local variable whose + /// Checks if the node is an argument. An argument is a local variable whose /// immediate parent is an item or a closure. pub fn is_argument(&self, id: NodeId) -> bool { match self.find(id) { @@ -837,7 +838,7 @@ impl<'hir> Map<'hir> { } } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn get_parent_item(&self, id: HirId) -> HirId { let node_id = self.hir_to_node_id(id); let parent_node_id = self.get_parent(node_id); @@ -869,9 +870,10 @@ impl<'hir> Map<'hir> { } /// Returns the nearest enclosing scope. A scope is an item or block. - /// FIXME: it is not clear to me that all items qualify as scopes -- statics - /// and associated types probably shouldn't, for example. Behavior in this - /// regard should be expected to be highly unstable. + // + // FIXME: it is not clear to me that all items qualify as scopes -- statics + // and associated types probably shouldn't, for example. Behavior in this + // regard should be expected to be highly unstable. pub fn get_enclosing_scope(&self, id: NodeId) -> Option { self.walk_parent_nodes(id, |node| match *node { Node::Item(_) | @@ -899,7 +901,8 @@ impl<'hir> Map<'hir> { if let Entry { node: Node::Item(Item { node: ItemKind::ForeignMod(ref nm), .. }), .. } = entry { - self.read(id); // reveals some of the content of a node + // Reveals some of the content of a node. + self.read(id); return nm.abi; } } @@ -913,13 +916,14 @@ impl<'hir> Map<'hir> { } pub fn expect_item(&self, id: NodeId) -> &'hir Item { - match self.find(id) { // read recorded by `find` + // Read recorded by `find` method. + match self.find(id) { Some(Node::Item(item)) => item, _ => bug!("expected item, found {}", self.node_to_string(id)) } } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn expect_item_by_hir_id(&self, id: HirId) -> &'hir Item { let node_id = self.hir_to_node_id(id); self.expect_item(node_id) @@ -985,19 +989,20 @@ impl<'hir> Map<'hir> { } pub fn expect_expr(&self, id: NodeId) -> &'hir Expr { - match self.find(id) { // read recorded by find + // Read recorded by `find` method. + match self.find(id) { Some(Node::Expr(expr)) => expr, _ => bug!("expected expr, found {}", self.node_to_string(id)) } } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn expect_expr_by_hir_id(&self, id: HirId) -> &'hir Expr { let node_id = self.hir_to_node_id(id); self.expect_expr(node_id) } - /// Returns the name associated with the given NodeId's AST. + /// Returns the name associated with the given `NodeId`'s AST. pub fn name(&self, id: NodeId) -> Name { match self.get(id) { Node::Item(i) => i.ident.name, @@ -1014,14 +1019,14 @@ impl<'hir> Map<'hir> { } } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn name_by_hir_id(&self, id: HirId) -> Name { let node_id = self.hir_to_node_id(id); self.name(node_id) } - /// Given a node ID, get a list of attributes associated with the AST - /// corresponding to the Node ID + /// Given a `NodeId`, get a list of attributes associated with the AST + /// corresponding to the `NodeId`. pub fn attrs(&self, id: NodeId) -> &'hir [ast::Attribute] { self.read(id); // reveals attributes on the node let attrs = match self.find(id) { @@ -1034,25 +1039,24 @@ impl<'hir> Map<'hir> { Some(Node::Expr(ref e)) => Some(&*e.attrs), Some(Node::Stmt(ref s)) => Some(s.node.attrs()), Some(Node::GenericParam(param)) => Some(¶m.attrs[..]), - // unit/tuple structs take the attributes straight from - // the struct definition. + // Unit/tuple structs take the attributes straight from the struct definition. Some(Node::StructCtor(_)) => return self.attrs(self.get_parent(id)), _ => None }; attrs.unwrap_or(&[]) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn attrs_by_hir_id(&self, id: HirId) -> &'hir [ast::Attribute] { let node_id = self.hir_to_node_id(id); self.attrs(node_id) } - /// Returns an iterator that yields the node id's with paths that - /// match `parts`. (Requires `parts` is non-empty.) + /// Returns an iterator that yields the `NodeId`'s with paths that + /// match `parts`. (Requires that `parts` is non-empty.) /// /// For example, if given `parts` equal to `["bar", "quux"]`, then - /// the iterator will produce node id's for items with paths + /// the iterator will produce `NodeId`'s for items with paths /// such as `foo::bar::quux`, `bar::quux`, `other::bar::quux`, and /// any other such items it can find in the map. pub fn nodes_matching_suffix<'a>(&'a self, parts: &'a [String]) @@ -1066,7 +1070,8 @@ impl<'hir> Map<'hir> { } pub fn span(&self, id: NodeId) -> Span { - self.read(id); // reveals span from node + // Reveals span from node. + self.read(id); match self.find_entry(id).map(|entry| entry.node) { Some(Node::Item(item)) => item.span, Some(Node::ForeignItem(foreign_item)) => foreign_item.span, @@ -1111,7 +1116,7 @@ impl<'hir> Map<'hir> { node_id_to_string(self, id, true) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn hir_to_string(&self, id: HirId) -> String { hir_id_to_string(self, id, true) } @@ -1120,7 +1125,7 @@ impl<'hir> Map<'hir> { node_id_to_string(self, id, false) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn hir_to_user_string(&self, id: HirId) -> String { hir_id_to_string(self, id, false) } @@ -1129,7 +1134,7 @@ impl<'hir> Map<'hir> { print::to_string(self, |s| s.print_node(self.get(id))) } - // FIXME(@ljedrz): replace the NodeId variant + // FIXME(ljedrz): replace the `NodeId` variant. pub fn hir_to_pretty_string(&self, id: HirId) -> String { print::to_string(self, |s| s.print_node(self.get_by_hir_id(id))) } @@ -1167,7 +1172,7 @@ impl<'a, 'hir> NodesMatchingSuffix<'a, 'hir> { // that mod's name. // // If `id` itself is a mod named `m` with parent `p`, then - // returns `Some(id, m, p)`. If `id` has no mod in its parent + // returns `Some(id, m, p)`. If `id` has no mod in its parent // chain, then returns `None`. fn find_first_mod_parent<'a>(map: &'a Map<'_>, mut id: NodeId) -> Option<(NodeId, Name)> { loop { @@ -1325,9 +1330,9 @@ impl<'a> print::State<'a> { Node::Block(a) => { use syntax::print::pprust::PrintState; - // containing cbox, will be closed by print-block at } + // Containing cbox -- will be closed by print-block at `}`. self.cbox(print::indent_unit)?; - // head-ibox, will be closed by print-block after { + // Head-ibox -- will be closed by print-block after `{`. self.ibox(0)?; self.print_block(&a) } @@ -1335,8 +1340,8 @@ impl<'a> print::State<'a> { Node::Visibility(a) => self.print_visibility(&a), Node::GenericParam(_) => bug!("cannot print Node::GenericParam"), Node::Field(_) => bug!("cannot print StructField"), - // these cases do not carry enough information in the - // hir_map to reconstruct their full structure for pretty + // These cases do not carry enough information in the + // `hir_map` to reconstruct their full structure for pretty // printing. Node::StructCtor(_) => bug!("cannot print isolated StructCtor"), Node::Local(a) => self.print_local_decl(&a), @@ -1476,7 +1481,7 @@ fn node_id_to_string(map: &Map<'_>, id: NodeId, include_id: bool) -> String { } } -// FIXME(@ljedrz): replace the NodeId variant +// FIXME(ljedrz): replace the `NodeId` variant. fn hir_id_to_string(map: &Map<'_>, id: HirId, include_id: bool) -> String { let node_id = map.hir_to_node_id(id); node_id_to_string(map, node_id, include_id) diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index d774359fa79ec..29d4f75dfd08a 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -1,4 +1,4 @@ -// HIR datatypes. See the [rustc guide] for more info. +// HIR data-types. See the [rustc guide] for more info. //! //! [rustc guide]: https://rust-lang.github.io/rustc-guide/hir.html @@ -112,12 +112,12 @@ impl serialize::UseSpecializedDecodable for HirId { } } -// hack to ensure that we don't try to access the private parts of `ItemLocalId` in this module +// HACK: ensure that we don't try to access the private parts of `ItemLocalId` in this module. mod item_local_id_inner { use rustc_data_structures::indexed_vec::Idx; /// An `ItemLocalId` uniquely identifies something within a given "item-like", - /// that is within a hir::Item, hir::TraitItem, or hir::ImplItem. There is no - /// guarantee that the numerical value of a given `ItemLocalId` corresponds to + /// that is within a `hir::Item`, `hir::TraitItem`, or `hir::ImplItem`. There is + /// no guarantee that the numerical value of a given `ItemLocalId` corresponds to /// the node's position within the owning item in any way, but there is a /// guarantee that the `LocalItemId`s within an owner occupy a dense range of /// integers starting at zero, so a mapping that maps all or most nodes within @@ -236,8 +236,8 @@ impl LifetimeName { LifetimeName::Implicit | LifetimeName::Underscore => true, // It might seem surprising that `Fresh(_)` counts as - // *not* elided -- but this is because, as far as the code - // in the compiler is concerned -- `Fresh(_)` variants act + // *not* elided, but this is because, as far as the code + // in the compiler is concerned, `Fresh(_)` variants act // equivalently to "some fresh name". They correspond to // early-bound regions on an impl, in other words. LifetimeName::Error | LifetimeName::Param(_) | LifetimeName::Static => false, @@ -375,8 +375,8 @@ impl PathSegment { } } - // FIXME: hack required because you can't create a static - // `GenericArgs`, so you can't just return a `&GenericArgs`. + // HACK: required because you can't create a static `GenericArgs`, + // so you can't just return a `&GenericArgs`. pub fn with_generic_args(&self, f: F) -> R where F: FnOnce(&GenericArgs) -> R { @@ -698,8 +698,8 @@ pub struct Crate { pub span: Span, pub exported_macros: HirVec, - // N.B., we use a BTreeMap here so that `visit_all_items` iterates - // over the ids in increasing order. In principle it should not + // N.B., we use a `BTreeMap` here so that `visit_all_items` iterates + // over the IDs in increasing order. In principle it should not // matter what order we visit things in, but in *practice* it // does, because it can affect the order in which errors are // detected, which in turn can make compile-fail tests yield @@ -833,7 +833,7 @@ impl fmt::Debug for Pat { } impl Pat { - // FIXME(#19596) this is a workaround, but there should be a better way + // FIXME(#19596): this is a workaround, but there should be a better way. fn walk_(&self, it: &mut G) -> bool where G: FnMut(&Pat) -> bool { @@ -1603,11 +1603,11 @@ impl fmt::Display for LoopIdError { #[derive(Clone, RustcEncodable, RustcDecodable, Debug, Copy)] pub struct Destination { - // This is `Some(_)` iff there is an explicit user-specified `label + // This is `Some(_)` iff there is an explicit user-specified label. pub label: Option