diff --git a/Cargo.toml b/Cargo.toml index c53741db3..165851b4e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -635,7 +635,6 @@ multiple_crate_versions = "allow" # 2314 missing_errors_doc = "allow" # 1504 missing_panics_doc = "allow" # 946 must_use_candidate = "allow" # 322 -doc_markdown = "allow" # 267 match_same_arms = "allow" # 212 unnecessary_semicolon = "allow" # 156 redundant_closure_for_method_calls = "allow" # 133 diff --git a/src/uu/cat/src/cat.rs b/src/uu/cat/src/cat.rs index 46c0d7195..ad8fcf449 100644 --- a/src/uu/cat/src/cat.rs +++ b/src/uu/cat/src/cat.rs @@ -188,7 +188,7 @@ struct InputHandle { /// Concrete enum of recognized file types. /// /// *Note*: `cat`-ing a directory should result in an -/// CatError::IsDirectory +/// [`CatError::IsDirectory`] enum InputType { Directory, File, diff --git a/src/uu/cksum/src/cksum.rs b/src/uu/cksum/src/cksum.rs index 4389aad71..ad2b55801 100644 --- a/src/uu/cksum/src/cksum.rs +++ b/src/uu/cksum/src/cksum.rs @@ -52,7 +52,7 @@ struct Options { /// # Arguments /// /// * `options` - CLI options for the assigning checksum algorithm -/// * `files` - A iterator of OsStr which is a bunch of files that are using for calculating checksum +/// * `files` - A iterator of [`OsStr`] which is a bunch of files that are using for calculating checksum #[allow(clippy::cognitive_complexity)] fn cksum<'a, I>(mut options: Options, files: I) -> UResult<()> where diff --git a/src/uu/cp/src/copydir.rs b/src/uu/cp/src/copydir.rs index 251c8c5af..d64505b18 100644 --- a/src/uu/cp/src/copydir.rs +++ b/src/uu/cp/src/copydir.rs @@ -543,7 +543,7 @@ pub fn path_has_prefix(p1: &Path, p2: &Path) -> io::Result { /// copied from the provided file. Otherwise, the new directory will have the default /// attributes for the current user. /// - This method excludes certain permissions if ownership or special mode bits could -/// potentially change. (See `test_dir_perm_race_with_preserve_mode_and_ownership``) +/// potentially change. (See `test_dir_perm_race_with_preserve_mode_and_ownership`) /// - The `recursive` flag determines whether parent directories should be created /// if they do not already exist. // we need to allow unused_variable since `options` might be unused in non unix systems diff --git a/src/uu/cp/src/cp.rs b/src/uu/cp/src/cp.rs index e3f03a40d..aa513be0a 100644 --- a/src/uu/cp/src/cp.rs +++ b/src/uu/cp/src/cp.rs @@ -48,11 +48,11 @@ mod platform; #[derive(Debug, Error)] pub enum CpError { - /// Simple io::Error wrapper + /// Simple [`io::Error`] wrapper #[error("{0}")] IoErr(#[from] io::Error), - /// Wrapper for io::Error with path context + /// Wrapper for [`io::Error`] with path context #[error("{1}: {0}")] IoErrContext(io::Error, String), @@ -65,11 +65,11 @@ pub enum CpError { #[error("{}", get_message("cp-error-not-all-files-copied"))] NotAllFilesCopied, - /// Simple walkdir::Error wrapper + /// Simple [`walkdir::Error`] wrapper #[error("{0}")] WalkDirErr(#[from] walkdir::Error), - /// Simple std::path::StripPrefixError wrapper + /// Simple [`StripPrefixError`] wrapper #[error(transparent)] StripPrefixError(#[from] StripPrefixError), @@ -84,9 +84,9 @@ pub enum CpError { #[error("{0}")] InvalidArgument(String), - /// All standard options are included as an an implementation + /// All standard options are included as an implementation /// path, but those that are not implemented yet should return - /// a NotImplemented error. + /// a `NotImplemented` error. #[error("{}", get_message_with_args("cp-error-option-not-implemented", HashMap::from([("option".to_string(), 0.to_string())])))] NotImplemented(String), @@ -931,8 +931,8 @@ impl Attributes { } } - /// Set the field to Preserve::NO { explicit: true } if the corresponding field - /// in other is set to Preserve::Yes { .. }. + /// Set the field to `Preserve::No { explicit: true }` if the corresponding field + /// in other is set to `Preserve::Yes { .. }`. pub fn diff(self, other: &Self) -> Self { fn update_preserve_field(current: Preserve, other: Preserve) -> Preserve { if matches!(other, Preserve::Yes { .. }) { @@ -1254,7 +1254,7 @@ impl Options { } impl TargetType { - /// Return TargetType required for `target`. + /// Return [`TargetType`] required for `target`. /// /// Treat target as a dir if we have multiple sources or the target /// exists and already is a directory @@ -1875,7 +1875,7 @@ fn context_for(src: &Path, dest: &Path) -> String { } /// Implements a simple backup copy for the destination file . -/// if is_dest_symlink flag is set to true dest will be renamed to backup_path +/// if `is_dest_symlink` flag is set to true dest will be renamed to `backup_path` /// TODO: for the backup, should this function be replaced by `copy_file(...)`? fn backup_dest(dest: &Path, backup_path: &Path, is_dest_symlink: bool) -> CopyResult { if is_dest_symlink { diff --git a/src/uu/cp/src/platform/linux.rs b/src/uu/cp/src/platform/linux.rs index 56b8b2fe4..1095d4674 100644 --- a/src/uu/cp/src/platform/linux.rs +++ b/src/uu/cp/src/platform/linux.rs @@ -29,10 +29,10 @@ enum CloneFallback { /// Use [`std::fs::copy`]. FSCopy, - /// Use sparse_copy + /// Use [`sparse_copy`] SparseCopy, - /// Use sparse_copy_without_hole + /// Use [`sparse_copy_without_hole`] SparseCopyWithoutHole, } @@ -43,9 +43,9 @@ enum CopyMethod { SparseCopy, /// Use [`std::fs::copy`]. FSCopy, - /// Default (can either be sparse_copy or FSCopy) + /// Default (can either be [`CopyMethod::SparseCopy`] or [`CopyMethod::FSCopy`]) Default, - /// Use sparse_copy_without_hole + /// Use [`sparse_copy_without_hole`] SparseCopyWithoutHole, } @@ -124,8 +124,8 @@ fn check_sparse_detection(source: &Path) -> Result { Ok(false) } -/// Optimized sparse_copy, doesn't create holes for large sequences of zeros in non sparse_files -/// Used when --sparse=auto +/// Optimized [`sparse_copy`] doesn't create holes for large sequences of zeros in non `sparse_files` +/// Used when `--sparse=auto` #[cfg(any(target_os = "linux", target_os = "android"))] fn sparse_copy_without_hole

(source: P, dest: P) -> std::io::Result<()> where @@ -175,7 +175,7 @@ where Ok(()) } /// Perform a sparse copy from one file to another. -/// Creates a holes for large sequences of zeros in non_sparse_files, used for --sparse=always +/// Creates a holes for large sequences of zeros in `non_sparse_files`, used for `--sparse=always` #[cfg(any(target_os = "linux", target_os = "android"))] fn sparse_copy

(source: P, dest: P) -> std::io::Result<()> where @@ -470,7 +470,7 @@ fn handle_reflink_never_sparse_never(source: &Path) -> Result Result { let mut copy_debug = CopyDebug { offload: OffloadReflinkDebug::Unknown, diff --git a/src/uu/csplit/src/split_name.rs b/src/uu/csplit/src/split_name.rs index 925ded4cc..5ac91bf92 100644 --- a/src/uu/csplit/src/split_name.rs +++ b/src/uu/csplit/src/split_name.rs @@ -16,7 +16,7 @@ pub struct SplitName { } impl SplitName { - /// Creates a new SplitName with the given user-defined options: + /// Creates a new [`SplitName`] with the given user-defined options: /// - `prefix_opt` specifies a prefix for all splits. /// - `format_opt` specifies a custom format for the suffix part of the filename, using the /// `sprintf` format notation. diff --git a/src/uu/date/src/date.rs b/src/uu/date/src/date.rs index 88b89fdae..36c9b4553 100644 --- a/src/uu/date/src/date.rs +++ b/src/uu/date/src/date.rs @@ -476,8 +476,8 @@ fn set_system_datetime(date: Zoned) -> UResult<()> { #[cfg(windows)] /// System call to set date (Windows). /// See here for more: -/// https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-setsystemtime -/// https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-systemtime +/// * +/// * fn set_system_datetime(date: Zoned) -> UResult<()> { let system_time = SYSTEMTIME { wYear: date.year() as u16, diff --git a/src/uu/dd/src/numbers.rs b/src/uu/dd/src/numbers.rs index b66893d8d..206cd7887 100644 --- a/src/uu/dd/src/numbers.rs +++ b/src/uu/dd/src/numbers.rs @@ -37,7 +37,7 @@ const SI_BASES: [u128; 10] = [ const SI_SUFFIXES: [&str; 9] = ["B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"]; -/// A SuffixType determines whether the suffixes are 1000 or 1024 based. +/// A `SuffixType` determines whether the suffixes are 1000 or 1024 based. #[derive(Clone, Copy)] pub(crate) enum SuffixType { Iec, diff --git a/src/uu/dd/src/parseargs.rs b/src/uu/dd/src/parseargs.rs index 758654de9..193e205c1 100644 --- a/src/uu/dd/src/parseargs.rs +++ b/src/uu/dd/src/parseargs.rs @@ -446,7 +446,7 @@ fn show_zero_multiplier_warning() { ); } -/// Parse bytes using str::parse, then map error if needed. +/// Parse bytes using [`str::parse`], then map error if needed. fn parse_bytes_only(s: &str, i: usize) -> Result { s[..i] .parse() @@ -502,7 +502,7 @@ fn parse_bytes_no_x(full: &str, s: &str) -> Result { } /// Parse byte and multiplier like 512, 5KiB, or 1G. -/// Uses uucore::parse_size, and adds the 'w' and 'c' suffixes which are mentioned +/// Uses [`uucore::parser::parse_size`], and adds the 'w' and 'c' suffixes which are mentioned /// in dd's info page. pub fn parse_bytes_with_opt_multiplier(s: &str) -> Result { // TODO On my Linux system, there seems to be a maximum block size of 4096 bytes: diff --git a/src/uu/df/src/blocks.rs b/src/uu/df/src/blocks.rs index 26b763cac..57301ba5f 100644 --- a/src/uu/df/src/blocks.rs +++ b/src/uu/df/src/blocks.rs @@ -40,8 +40,8 @@ const SI_BASES: [u128; 10] = [ 1_000_000_000_000_000_000_000_000_000, ]; -/// A SuffixType determines whether the suffixes are 1000 or 1024 based, and whether they are -/// intended for HumanReadable mode or not. +/// A `SuffixType` determines whether the suffixes are 1000 or 1024 based, and whether they are +/// intended for `HumanReadable` mode or not. #[derive(Clone, Copy)] pub(crate) enum SuffixType { Iec, diff --git a/src/uu/df/src/filesystem.rs b/src/uu/df/src/filesystem.rs index 43b1deb36..ac9bbeaf0 100644 --- a/src/uu/df/src/filesystem.rs +++ b/src/uu/df/src/filesystem.rs @@ -48,7 +48,7 @@ pub(crate) enum FsError { /// Check whether `mount` has been over-mounted. /// /// `mount` is considered over-mounted if it there is an element in -/// `mounts` after mount that has the same mount_dir. +/// `mounts` after mount that has the same `mount_dir`. #[cfg(not(windows))] fn is_over_mounted(mounts: &[MountInfo], mount: &MountInfo) -> bool { let last_mount_for_dir = mounts diff --git a/src/uu/df/src/table.rs b/src/uu/df/src/table.rs index 6e79c3758..62b8445a4 100644 --- a/src/uu/df/src/table.rs +++ b/src/uu/df/src/table.rs @@ -297,7 +297,7 @@ impl<'a> RowFormatter<'a> { } } -/// A HeaderMode defines what header labels should be shown. +/// A `HeaderMode` defines what header labels should be shown. pub(crate) enum HeaderMode { Default, // the user used -h or -H diff --git a/src/uu/dircolors/src/dircolors.rs b/src/uu/dircolors/src/dircolors.rs index 01ad4cce1..9ee80140a 100644 --- a/src/uu/dircolors/src/dircolors.rs +++ b/src/uu/dircolors/src/dircolors.rs @@ -294,7 +294,7 @@ pub fn uu_app() -> Command { pub trait StrUtils { /// Remove comments and trim whitespace fn purify(&self) -> &Self; - /// Like split_whitespace() but only produce 2 components + /// Like `split_whitespace()` but only produce 2 parts fn split_two(&self) -> (&str, &str); fn fnmatch(&self, pattern: &str) -> bool; } diff --git a/src/uu/echo/src/echo.rs b/src/uu/echo/src/echo.rs index e0c542366..e378ecd00 100644 --- a/src/uu/echo/src/echo.rs +++ b/src/uu/echo/src/echo.rs @@ -69,8 +69,8 @@ fn is_echo_flag(arg: &OsString, echo_options: &mut EchoOptions) -> bool { /// Processes command line arguments, separating flags from normal arguments /// Returns: /// - Vector of non-flag arguments -/// - trailing_newline: whether to print a trailing newline -/// - escape: whether to process escape sequences +/// - `trailing_newline`: whether to print a trailing newline +/// - `escape`: whether to process escape sequences fn filter_echo_flags(args: impl uucore::Args) -> (Vec, bool, bool) { let mut result = Vec::new(); let mut echo_options = EchoOptions { diff --git a/src/uu/env/src/split_iterator.rs b/src/uu/env/src/split_iterator.rs index 40e5e9dae..546a73038 100644 --- a/src/uu/env/src/split_iterator.rs +++ b/src/uu/env/src/split_iterator.rs @@ -7,9 +7,9 @@ // licensed under the Apache License, Version 2.0 // or the MIT license , at your option. // -//! Process command line according to parsing rules of original GNU env. +//! Process command line according to parsing rules of the original GNU env. //! Even though it looks quite like a POSIX syntax, the original -//! "shell_words" implementation had to be adapted significantly. +//! `shell_words` implementation had to be adapted significantly. //! //! Apart from the grammar differences, there is a new feature integrated: $VARIABLE expansion. //! diff --git a/src/uu/env/src/string_expander.rs b/src/uu/env/src/string_expander.rs index 48f98e1fe..777420561 100644 --- a/src/uu/env/src/string_expander.rs +++ b/src/uu/env/src/string_expander.rs @@ -16,8 +16,8 @@ use crate::{ /// This class makes parsing and word collection more convenient. /// /// It manages an "output" buffer that is automatically filled. -/// It provides "skip_one" and "take_one" that focus on -/// working with ASCII separators. Thus they will skip or take +/// It provides `skip_one` and `take_one` that focus on +/// working with ASCII separators. Thus, they will skip or take /// all consecutive non-ascii char sequences at once. pub struct StringExpander<'a> { parser: StringParser<'a>, diff --git a/src/uu/env/src/string_parser.rs b/src/uu/env/src/string_parser.rs index 84eb346fa..155eb4781 100644 --- a/src/uu/env/src/string_parser.rs +++ b/src/uu/env/src/string_parser.rs @@ -25,7 +25,7 @@ pub enum ErrorType { InternalError, } -/// Provides a valid char or a invalid sequence of bytes. +/// Provides a valid char or an invalid sequence of bytes. /// /// Invalid byte sequences can't be split in any meaningful way. /// Thus, they need to be consumed as one piece. @@ -34,9 +34,9 @@ pub enum Chunk<'a> { ValidSingleIntChar((char, NativeCharInt)), } -/// This class makes parsing a OsString char by char more convenient. +/// This class makes parsing a [`std::ffi::OsString`] char by char more convenient. /// -/// It also allows to capturing of intermediate positions for later splitting. +/// It also allows capturing the intermediate positions for later splitting. pub struct StringParser<'a> { input: &'a NativeIntStr, pointer: usize, diff --git a/src/uu/expr/src/syntax_tree.rs b/src/uu/expr/src/syntax_tree.rs index 673f3b43e..b0ae0142f 100644 --- a/src/uu/expr/src/syntax_tree.rs +++ b/src/uu/expr/src/syntax_tree.rs @@ -331,7 +331,7 @@ where /// /// This method is not comprehensively checking all cases in which /// a regular expression could be invalid; any cases not caught will -/// result in a [ExprError::InvalidRegexExpression] when passing the +/// result in a [`ExprError::InvalidRegexExpression`] when passing the /// regular expression through the Oniguruma bindings. This method is /// intended to just identify a few situations for which GNU coreutils /// has specific error messages. diff --git a/src/uu/fmt/src/parasplit.rs b/src/uu/fmt/src/parasplit.rs index dc8d98f6c..5911a8e47 100644 --- a/src/uu/fmt/src/parasplit.rs +++ b/src/uu/fmt/src/parasplit.rs @@ -207,10 +207,10 @@ impl Iterator for FileLines<'_> { } } -/// A paragraph : a collection of FileLines that are to be formatted +/// A paragraph : a collection of [`FileLines`] that are to be formatted /// plus info about the paragraph's indentation /// -/// We only retain the String from the FileLine; the other info +/// We only retain the String from the [`FileLine`]; the other info /// is only there to help us in deciding how to merge lines into Paragraphs #[derive(Debug)] pub struct Paragraph { diff --git a/src/uu/hashsum/src/hashsum.rs b/src/uu/hashsum/src/hashsum.rs index 308360bee..1e215014a 100644 --- a/src/uu/hashsum/src/hashsum.rs +++ b/src/uu/hashsum/src/hashsum.rs @@ -57,7 +57,7 @@ struct Options<'a> { /// /// # Returns /// -/// Returns a UResult of a tuple containing the algorithm name, the hasher instance, and +/// Returns a [`UResult`] of a tuple containing the algorithm name, the hasher instance, and /// the output length in bits or an Err if multiple hash algorithms are specified or if a /// required flag is missing. #[allow(clippy::cognitive_complexity)] diff --git a/src/uu/head/src/take.rs b/src/uu/head/src/take.rs index de8831cc9..57a7e887f 100644 --- a/src/uu/head/src/take.rs +++ b/src/uu/head/src/take.rs @@ -82,10 +82,10 @@ impl TakeAllBuffer { /// copied. /// /// Algorithm for this function is as follows... -/// 1 - Chunks of the input file are read into a queue of TakeAllBuffer instances. +/// 1 - Chunks of the input file are read into a queue of [`TakeAllBuffer`] instances. /// Chunks are read until at least we have enough data to write out the entire contents of the -/// first TakeAllBuffer in the queue whilst still retaining at least `n` bytes in the queue. -/// If we hit EoF at any point, stop reading. +/// first [`TakeAllBuffer`] in the queue whilst still retaining at least `n` bytes in the queue. +/// If we hit `EoF` at any point, stop reading. /// 2 - Assess whether we managed to queue up greater-than `n` bytes. If not, we must be done, in /// which case break and return. /// 3 - Write either the full first buffer of data, or just enough bytes to get back down to having @@ -233,19 +233,19 @@ impl TakeAllLinesBuffer { /// copied. /// /// Algorithm for this function is as follows... -/// 1 - Chunks of the input file are read into a queue of TakeAllLinesBuffer instances. +/// 1 - Chunks of the input file are read into a queue of [`TakeAllLinesBuffer`] instances. /// Chunks are read until at least we have enough lines that we can write out the entire -/// contents of the first TakeAllLinesBuffer in the queue whilst still retaining at least +/// contents of the first [`TakeAllLinesBuffer`] in the queue whilst still retaining at least /// `n` lines in the queue. -/// If we hit EoF at any point, stop reading. +/// If we hit `EoF` at any point, stop reading. /// 2 - Asses whether we managed to queue up greater-than `n` lines. If not, we must be done, in /// which case break and return. /// 3 - Write either the full first buffer of data, or just enough lines to get back down to /// having the required `n` lines of data queued. /// 4 - Go back to (1). /// -/// Note that lines will regularly straddle multiple TakeAllLinesBuffer instances. The partial_line -/// flag on TakeAllLinesBuffer tracks this, and we use that to ensure that we write out enough +/// Note that lines will regularly straddle multiple [`TakeAllLinesBuffer`] instances. The `partial_line` +/// flag on [`TakeAllLinesBuffer`] tracks this, and we use that to ensure that we write out enough /// lines in the case that the input file doesn't end with a `separator` character. pub fn copy_all_but_n_lines( mut reader: R, diff --git a/src/uu/install/src/install.rs b/src/uu/install/src/install.rs index d8d7bc5e3..9cb3e4f70 100644 --- a/src/uu/install/src/install.rs +++ b/src/uu/install/src/install.rs @@ -655,8 +655,8 @@ fn standard(mut paths: Vec, b: &Behavior) -> UResult<()> { /// /// # Parameters /// -/// _files_ must all exist as non-directories. -/// _target_dir_ must be a directory. +/// `files` must all exist as non-directories. +/// `target_dir` must be a directory. /// fn copy_files_into_dir(files: &[PathBuf], target_dir: &Path, b: &Behavior) -> UResult<()> { if !target_dir.is_dir() { @@ -768,7 +768,7 @@ fn perform_backup(to: &Path, b: &Behavior) -> UResult> { } } -/// Copy a non-special file using std::fs::copy. +/// Copy a non-special file using [`fs::copy`]. /// /// # Parameters /// * `from` - The source file path. diff --git a/src/uu/ls/src/colors.rs b/src/uu/ls/src/colors.rs index ab19672ea..4affbbf8c 100644 --- a/src/uu/ls/src/colors.rs +++ b/src/uu/ls/src/colors.rs @@ -147,9 +147,8 @@ impl<'a> StyleManager<'a> { } /// Colors the provided name based on the style determined for the given path -/// This function is quite long because it tries to leverage DirEntry to avoid -/// unnecessary calls to stat() -/// and manages the symlink errors +/// This function is quite long because it tries to leverage [`DirEntry`] to avoid +/// unnecessary calls to stat and manages the symlink errors pub(crate) fn color_name( name: OsString, path: &PathData, diff --git a/src/uu/ls/src/ls.rs b/src/uu/ls/src/ls.rs index 572daa61e..d07ee2997 100644 --- a/src/uu/ls/src/ls.rs +++ b/src/uu/ls/src/ls.rs @@ -77,6 +77,7 @@ use uucore::{parser::parse_glob, show, show_error, show_warning}; mod dired; use dired::{DiredOutput, is_dired_arg_present}; mod colors; +use crate::options::QUOTING_STYLE; use colors::{StyleManager, color_name}; pub mod options { @@ -583,12 +584,12 @@ fn extract_hyperlink(options: &clap::ArgMatches) -> bool { } } -/// Match the argument given to --quoting-style or the QUOTING_STYLE env variable. +/// Match the argument given to --quoting-style or the [`QUOTING_STYLE`] env variable. /// /// # Arguments /// /// * `style`: the actual argument string -/// * `show_control` - A boolean value representing whether or not to show control characters. +/// * `show_control` - A boolean value representing whether to show control characters. /// /// # Returns /// @@ -609,18 +610,18 @@ fn match_quoting_style_name(style: &str, show_control: bool) -> Option QuotingStyle { - let opt_quoting_style = options.get_one::(options::QUOTING_STYLE); + let opt_quoting_style = options.get_one::(QUOTING_STYLE); if let Some(style) = opt_quoting_style { match match_quoting_style_name(style, show_control) { @@ -670,7 +671,7 @@ fn extract_quoting_style(options: &clap::ArgMatches, show_control: bool) -> Quot /// /// # Returns /// -/// An IndicatorStyle variant representing the indicator style to use. +/// An [`IndicatorStyle`] variant representing the indicator style to use. fn extract_indicator_style(options: &clap::ArgMatches) -> IndicatorStyle { if let Some(field) = options.get_one::(options::INDICATOR_STYLE) { match field.as_str() { @@ -998,7 +999,7 @@ impl Config { let zero_colors_opts = [options::COLOR]; let zero_show_control_opts = [options::HIDE_CONTROL_CHARS, options::SHOW_CONTROL_CHARS]; let zero_quoting_style_opts = [ - options::QUOTING_STYLE, + QUOTING_STYLE, options::quoting::C, options::quoting::ESCAPE, options::quoting::LITERAL, @@ -1330,8 +1331,8 @@ pub fn uu_app() -> Command { ) // Quoting style .arg( - Arg::new(options::QUOTING_STYLE) - .long(options::QUOTING_STYLE) + Arg::new(QUOTING_STYLE) + .long(QUOTING_STYLE) .help(get_message("ls-help-set-quoting-style")) .value_parser(ShortcutValueParser::new([ PossibleValue::new("literal"), @@ -1343,7 +1344,7 @@ pub fn uu_app() -> Command { PossibleValue::new("escape"), ])) .overrides_with_all([ - options::QUOTING_STYLE, + QUOTING_STYLE, options::quoting::LITERAL, options::quoting::ESCAPE, options::quoting::C, @@ -1356,7 +1357,7 @@ pub fn uu_app() -> Command { .alias("l") .help(get_message("ls-help-literal-quoting-style")) .overrides_with_all([ - options::QUOTING_STYLE, + QUOTING_STYLE, options::quoting::LITERAL, options::quoting::ESCAPE, options::quoting::C, @@ -1369,7 +1370,7 @@ pub fn uu_app() -> Command { .long(options::quoting::ESCAPE) .help(get_message("ls-help-escape-quoting-style")) .overrides_with_all([ - options::QUOTING_STYLE, + QUOTING_STYLE, options::quoting::LITERAL, options::quoting::ESCAPE, options::quoting::C, @@ -1382,7 +1383,7 @@ pub fn uu_app() -> Command { .long(options::quoting::C) .help(get_message("ls-help-c-quoting-style")) .overrides_with_all([ - options::QUOTING_STYLE, + QUOTING_STYLE, options::quoting::LITERAL, options::quoting::ESCAPE, options::quoting::C, @@ -2703,7 +2704,7 @@ fn display_grid( Ok(()) } -/// This writes to the BufWriter state.out a single string of the output of `ls -l`. +/// This writes to the [`BufWriter`] `state.out` a single string of the output of `ls -l`. /// /// It writes the following keys, in order: /// * `inode` ([`get_inode`], config-optional) @@ -2717,8 +2718,8 @@ fn display_grid( /// * `item_name` ([`display_item_name`]) /// /// This function needs to display information in columns: -/// * permissions and system_time are already guaranteed to be pre-formatted in fixed length. -/// * item_name is the last column and is left-aligned. +/// * permissions and `system_time` are already guaranteed to be pre-formatted in fixed length. +/// * `item_name` is the last column and is left-aligned. /// * Everything else needs to be padded using [`pad_left`]. /// /// That's why we have the parameters: diff --git a/src/uu/mkdir/src/mkdir.rs b/src/uu/mkdir/src/mkdir.rs index 7d41e4a14..4e46460f2 100644 --- a/src/uu/mkdir/src/mkdir.rs +++ b/src/uu/mkdir/src/mkdir.rs @@ -42,10 +42,10 @@ pub struct Config<'a> { /// Print message for each created directory. pub verbose: bool, - /// Set SELinux security context. + /// Set `SELinux` security context. pub set_selinux_context: bool, - /// Specific SELinux context. + /// Specific `SELinux` context. pub context: Option<&'a String>, } diff --git a/src/uu/mknod/src/mknod.rs b/src/uu/mknod/src/mknod.rs index 55a7bba1b..ea690860c 100644 --- a/src/uu/mknod/src/mknod.rs +++ b/src/uu/mknod/src/mknod.rs @@ -56,10 +56,10 @@ pub struct Config<'a> { pub dev: dev_t, - /// Set SELinux security context. + /// Set `SELinux` security context. pub set_selinux_context: bool, - /// Specific SELinux context. + /// Specific `SELinux` context. pub context: Option<&'a String>, } diff --git a/src/uu/od/src/inputoffset.rs b/src/uu/od/src/inputoffset.rs index cb5da6639..9cef72e97 100644 --- a/src/uu/od/src/inputoffset.rs +++ b/src/uu/od/src/inputoffset.rs @@ -12,7 +12,7 @@ pub enum Radix { /// provides the byte offset printed at the left margin pub struct InputOffset { - /// The radix to print the byte offset. NoPrefix will not print a byte offset. + /// The radix to print the byte offset. [`Radix::NoPrefix`] will not print a byte offset. radix: Radix, /// The current position. Initialize at `new`, increase using `increase_position`. byte_pos: u64, diff --git a/src/uu/od/src/mockstream.rs b/src/uu/od/src/mockstream.rs index 9904fa9c1..8cfb26bc9 100644 --- a/src/uu/od/src/mockstream.rs +++ b/src/uu/od/src/mockstream.rs @@ -54,7 +54,7 @@ pub struct FailingMockStream { } impl FailingMockStream { - /// Creates a FailingMockStream + /// Creates a [`FailingMockStream`] /// /// When `read` or `write` is called, it will return an error `repeat_count` times. /// `kind` and `message` can be specified to define the exact error. diff --git a/src/uu/shred/src/shred.rs b/src/uu/shred/src/shred.rs index 493660fb2..71810368e 100644 --- a/src/uu/shred/src/shred.rs +++ b/src/uu/shred/src/shred.rs @@ -102,7 +102,7 @@ enum RemoveMethod { WipeSync, // The same as 'Wipe' sync the file name changes } -/// Iterates over all possible filenames of a certain length using NAME_CHARSET as an alphabet +/// Iterates over all possible filenames of a certain length using [`NAME_CHARSET`] as an alphabet struct FilenameIter { // Store the indices of the letters of our filename in NAME_CHARSET name_charset_indices: Vec, @@ -156,7 +156,7 @@ enum RandomSource { Read(File), } -/// Used to generate blocks of bytes of size <= BLOCK_SIZE based on either a give pattern +/// Used to generate blocks of bytes of size <= [`BLOCK_SIZE`] based on either a give pattern /// or randomness // The lint warns about a large difference because StdRng is big, but the buffers are much // larger anyway, so it's fine. @@ -170,7 +170,7 @@ enum BytesWriter<'a> { rng_file: &'a File, buffer: [u8; BLOCK_SIZE], }, - // To write patterns we only write to the buffer once. To be able to do + // To write patterns, we only write to the buffer once. To be able to do // this, we need to extend the buffer with 2 bytes. We can then easily // obtain a buffer starting with any character of the pattern that we // want with an offset of either 0, 1 or 2. @@ -178,7 +178,7 @@ enum BytesWriter<'a> { // For example, if we have the pattern ABC, but we want to write a block // of BLOCK_SIZE starting with B, we just pick the slice [1..BLOCK_SIZE+1] // This means that we only have to fill the buffer once and can just reuse - // it afterwards. + // it afterward. Pattern { offset: usize, buffer: [u8; PATTERN_BUFFER_SIZE], diff --git a/src/uu/sort/src/numeric_str_cmp.rs b/src/uu/sort/src/numeric_str_cmp.rs index d3d04a348..4484d21c1 100644 --- a/src/uu/sort/src/numeric_str_cmp.rs +++ b/src/uu/sort/src/numeric_str_cmp.rs @@ -5,12 +5,12 @@ //! Fast comparison for strings representing a base 10 number without precision loss. //! -//! To be able to short-circuit when comparing, [NumInfo] must be passed along with each number -//! to [numeric_str_cmp]. [NumInfo] is generally obtained by calling [NumInfo::parse] and should be cached. -//! It is allowed to arbitrarily modify the exponent afterwards, which is equivalent to shifting the decimal point. +//! To be able to short-circuit when comparing, [`NumInfo`] must be passed along with each number +//! to [`numeric_str_cmp`]. [`NumInfo`] is generally obtained by calling [`NumInfo::parse`] and should be cached. +//! It is allowed to arbitrarily modify the exponent afterward, which is equivalent to shifting the decimal point. //! -//! More specifically, exponent can be understood so that the original number is in (1..10)*10^exponent. -//! From that follows the constraints of this algorithm: It is able to compare numbers in ±(1*10^[i64::MIN]..10*10^[i64::MAX]). +//! More specifically, exponent can be understood so that the original number is in `(1..10)*10^exponent`. +//! From that follows the constraints of this algorithm: It is able to compare numbers in ±(1*10^[`i64::MIN`]..10*10^[`i64::MAX`]). use std::{cmp::Ordering, ops::Range}; @@ -43,8 +43,8 @@ impl Default for NumInfoParseSettings { } impl NumInfo { - /// Parse NumInfo for this number. - /// Also returns the range of num that should be passed to numeric_str_cmp later. + /// Parse [`NumInfo`] for this number. + /// Also returns the range of num that should be passed to [`numeric_str_cmp`] later. /// /// Leading zeros will be excluded from the returned range. If the number consists of only zeros, /// an empty range (idx..idx) is returned so that idx is the char after the last zero. @@ -213,7 +213,7 @@ pub fn human_numeric_str_cmp( } /// Compare two numbers as strings without parsing them as a number first. This should be more performant and can handle numbers more precisely. -/// NumInfo is needed to provide a fast path for most numbers. +/// [`NumInfo`] is needed to provide a fast path for most numbers. #[inline(always)] pub fn numeric_str_cmp((a, a_info): (&str, &NumInfo), (b, b_info): (&str, &NumInfo)) -> Ordering { // check for a difference in the sign diff --git a/src/uu/sort/src/sort.rs b/src/uu/sort/src/sort.rs index 9b956d3a8..d7f3f5a26 100644 --- a/src/uu/sort/src/sort.rs +++ b/src/uu/sort/src/sort.rs @@ -933,7 +933,7 @@ impl FieldSelector { } /// Get the selection that corresponds to this selector for the line. - /// If needs_fields returned false, tokens may be empty. + /// If `needs_fields` returned false, tokens may be empty. fn get_selection<'a>(&self, line: &'a str, tokens: &[Field]) -> Selection<'a> { // `get_range` expects `None` when we don't need tokens and would get confused by an empty vector. let tokens = if self.needs_tokens { @@ -964,7 +964,7 @@ impl FieldSelector { } /// Look up the range in the line that corresponds to this selector. - /// If needs_fields returned false, tokens must be None. + /// If `needs_fields` returned false, tokens must be None. fn get_range(&self, line: &str, tokens: Option<&[Field]>) -> Range { enum Resolution { // The start index of the resolved character, inclusive @@ -1878,8 +1878,8 @@ pub enum GeneralBigDecimalParseResult { Infinity, } -/// Parse the beginning string into a GeneralBigDecimalParseResult. -/// Using a GeneralBigDecimalParseResult instead of ExtendedBigDecimal is necessary to correctly order floats. +/// Parse the beginning string into a [`GeneralBigDecimalParseResult`]. +/// Using a [`GeneralBigDecimalParseResult`] instead of [`ExtendedBigDecimal`] is necessary to correctly order floats. #[inline(always)] fn general_bd_parse(a: &str) -> GeneralBigDecimalParseResult { // Parse digits, and fold in recoverable errors @@ -1946,7 +1946,7 @@ enum Month { December, } -/// Parse the beginning string into a Month, returning Month::Unknown on errors. +/// Parse the beginning string into a Month, returning [`Month::Unknown`] on errors. fn month_parse(line: &str) -> Month { let line = line.trim(); diff --git a/src/uu/sort/src/tmp_dir.rs b/src/uu/sort/src/tmp_dir.rs index 1ed0268ec..009eef459 100644 --- a/src/uu/sort/src/tmp_dir.rs +++ b/src/uu/sort/src/tmp_dir.rs @@ -18,7 +18,7 @@ use uucore::{ use crate::SortError; -/// A wrapper around TempDir that may only exist once in a process. +/// A wrapper around [`TempDir`] that may only exist once in a process. /// /// `TmpDirWrapper` handles the allocation of new temporary files in this temporary directory and /// deleting the whole directory when `SIGINT` is received. Creating a second `TmpDirWrapper` will diff --git a/src/uu/split/src/platform/unix.rs b/src/uu/split/src/platform/unix.rs index 4b2468d92..f883f663c 100644 --- a/src/uu/split/src/platform/unix.rs +++ b/src/uu/split/src/platform/unix.rs @@ -14,7 +14,7 @@ use uucore::fs::FileInformation; use uucore::locale::get_message_with_args; use uucore::show; -/// A writer that writes to a shell_process' stdin +/// A writer that writes to a `shell_process`' stdin /// /// We use a shell process (not directly calling a sub-process) so we can forward the name of the /// corresponding output file (xaa, xab, xac… ). This is the way it was implemented in GNU split. diff --git a/src/uu/split/src/split.rs b/src/uu/split/src/split.rs index 1a8eb5f4a..eea97ed97 100644 --- a/src/uu/split/src/split.rs +++ b/src/uu/split/src/split.rs @@ -539,7 +539,7 @@ impl Settings { } /// When using `--filter` option, writing to child command process stdin -/// could fail with BrokenPipe error +/// could fail with [`ErrorKind::BrokenPipe`] error /// It can be safely ignored fn ignorable_io_error(error: &io::Error, settings: &Settings) -> bool { error.kind() == ErrorKind::BrokenPipe && settings.filter.is_some() @@ -560,7 +560,7 @@ fn custom_write(bytes: &[u8], writer: &mut T, settings: &Settings) -> /// Custom wrapper for `write_all()` method /// Similar to [`custom_write`], but returns true or false -/// depending on if `--filter` stdin is still open (no BrokenPipe error) +/// depending on if `--filter` stdin is still open (no [`ErrorKind::BrokenPipe`] error) /// Should not be used for Kth chunk number sub-strategies /// as those do not work with `--filter` option fn custom_write_all( @@ -923,7 +923,7 @@ trait ManageOutFiles { settings: &Settings, ) -> UResult<&mut BufWriter>>; /// Initialize a new set of output files - /// Each OutFile is generated with filename, while the writer for it could be + /// Each [`OutFile`] is generated with filename, while the writer for it could be /// optional, to be instantiated later by the calling function as needed. /// Optional writers could happen in the following situations: /// * in [`n_chunks_by_line`] and [`n_chunks_by_line_round_robin`] if `elide_empty_files` parameter is set to `true` diff --git a/src/uu/stat/src/stat.rs b/src/uu/stat/src/stat.rs index e109ee091..6026c98e4 100644 --- a/src/uu/stat/src/stat.rs +++ b/src/uu/stat/src/stat.rs @@ -268,7 +268,7 @@ struct Stater { /// /// # Arguments /// -/// * `output` - A reference to the OutputType enum containing the value to be printed. +/// * `output` - A reference to the [`OutputType`] enum containing the value to be printed. /// * `flags` - A Flags struct containing formatting flags. /// * `width` - The width of the field for the printed output. /// * `precision` - How many digits of precision, if any. diff --git a/src/uu/tail/src/chunks.rs b/src/uu/tail/src/chunks.rs index 0811b4306..bf35d6401 100644 --- a/src/uu/tail/src/chunks.rs +++ b/src/uu/tail/src/chunks.rs @@ -19,7 +19,7 @@ use uucore::error::UResult; /// block read at a time. pub const BLOCK_SIZE: u64 = 1 << 16; -/// The size of the backing buffer of a LinesChunk or BytesChunk in bytes. The value of BUFFER_SIZE +/// The size of the backing buffer of a [`LinesChunk`] or [`BytesChunk`] in bytes. The value of `BUFFER_SIZE` /// originates from the BUFSIZ constant in stdio.h and the libc crate to make stream IO efficient. /// In the latter the value is constantly set to 8192 on all platforms, where the value in stdio.h /// is determined on each platform differently. Since libc chose 8192 as a reasonable default the @@ -115,8 +115,8 @@ pub struct BytesChunk { /// [`BytesChunk::fill`] buffer: ChunkBuffer, - /// Stores the number of bytes, this buffer holds. This is not equal to buffer.len(), since the - /// [`BytesChunk`] may store less bytes than the internal buffer can hold. In addition + /// Stores the number of bytes, this buffer holds. This is not equal to `buffer.len()`, since the + /// [`BytesChunk`] may store less bytes than the internal buffer can hold. In addition, /// [`BytesChunk`] may be reused, what makes it necessary to track the number of stored bytes. /// The choice of usize is sufficient here, since the number of bytes max value is /// [`BUFFER_SIZE`], which is a usize. diff --git a/src/uu/tail/src/follow/files.rs b/src/uu/tail/src/follow/files.rs index 0fcf90e2a..8043201ef 100644 --- a/src/uu/tail/src/follow/files.rs +++ b/src/uu/tail/src/follow/files.rs @@ -18,9 +18,9 @@ use uucore::error::UResult; /// Data structure to keep a handle on files to follow. /// `last` always holds the path/key of the last file that was printed from. -/// The keys of the HashMap can point to an existing file path (normal case), -/// or stdin ("-"), or to a non existing path (--retry). -/// For existing files, all keys in the HashMap are absolute Paths. +/// The keys of the [`HashMap`] can point to an existing file path (normal case), +/// or stdin ("-"), or to a non-existing path (--retry). +/// For existing files, all keys in the [`HashMap`] are absolute Paths. pub struct FileHandling { map: HashMap, last: Option, @@ -36,7 +36,7 @@ impl FileHandling { } } - /// Wrapper for HashMap::insert using Path::canonicalize + /// Wrapper for [`HashMap::insert`] using [`Path::canonicalize`] pub fn insert(&mut self, k: &Path, v: PathData, update_last: bool) { let k = Self::canonicalize_path(k); if update_last { @@ -45,17 +45,17 @@ impl FileHandling { let _ = self.map.insert(k, v); } - /// Wrapper for HashMap::remove using Path::canonicalize + /// Wrapper for [`HashMap::remove`] using [`Path::canonicalize`] pub fn remove(&mut self, k: &Path) -> PathData { self.map.remove(&Self::canonicalize_path(k)).unwrap() } - /// Wrapper for HashMap::get using Path::canonicalize + /// Wrapper for [`HashMap::get`] using [`Path::canonicalize`] pub fn get(&self, k: &Path) -> &PathData { self.map.get(&Self::canonicalize_path(k)).unwrap() } - /// Wrapper for HashMap::get_mut using Path::canonicalize + /// Wrapper for [`HashMap::get_mut`] using [`Path::canonicalize`] pub fn get_mut(&mut self, k: &Path) -> &mut PathData { self.map.get_mut(&Self::canonicalize_path(k)).unwrap() } @@ -115,8 +115,8 @@ impl FileHandling { pub fn update_reader(&mut self, path: &Path) -> UResult<()> { /* BUG: If it's not necessary to reopen a file, GNU's tail calls seek to offset 0. - However we can't call seek here because `BufRead` does not implement `Seek`. - As a workaround we always reopen the file even though this might not always + However, we can't call seek here because `BufRead` does not implement `Seek`. + As a workaround, we always reopen the file even though this might not always be necessary. */ self.get_mut(path) @@ -172,8 +172,8 @@ impl FileHandling { } } -/// Data structure to keep a handle on the BufReader, Metadata -/// and the display_name (header_name) of files that are being followed. +/// Data structure to keep a handle on the [`BufReader`], [`Metadata`] +/// and the `display_name` (`header_name`) of files that are being followed. pub struct PathData { pub reader: Option>, pub metadata: Option, diff --git a/src/uu/test/src/error.rs b/src/uu/test/src/error.rs index fd66641a9..4bc062fc4 100644 --- a/src/uu/test/src/error.rs +++ b/src/uu/test/src/error.rs @@ -29,7 +29,7 @@ pub enum ParseError { /// A Result type for parsing test expressions pub type ParseResult = Result; -/// Implement UError trait for ParseError to make it easier to return useful error codes from main(). +/// Implement `UError` trait for `ParseError` to make it easier to return useful error codes from `main()`. impl uucore::error::UError for ParseError { fn code(&self) -> i32 { 2 diff --git a/src/uu/test/src/parser.rs b/src/uu/test/src/parser.rs index 417de3380..167bf7702 100644 --- a/src/uu/test/src/parser.rs +++ b/src/uu/test/src/parser.rs @@ -40,9 +40,9 @@ pub enum Symbol { } impl Symbol { - /// Create a new Symbol from an OsString. + /// Create a new Symbol from an [`OsString`]. /// - /// Returns Symbol::None in place of None + /// Returns `Symbol::None` in place of None fn new(token: Option) -> Self { match token { Some(s) => match s.to_str() { @@ -66,13 +66,13 @@ impl Symbol { } } - /// Convert this Symbol into a Symbol::Literal, useful for cases where + /// Convert this Symbol into a [`Symbol::Literal`], useful for cases where /// test treats an operator as a string operand (test has no reserved /// words). /// /// # Panics /// - /// Panics if `self` is Symbol::None + /// Panics if `self` is [`Symbol::None`] fn into_literal(self) -> Self { Self::Literal(match self { Self::LParen => OsString::from("("), @@ -106,7 +106,7 @@ impl std::fmt::Display for Symbol { } } -/// Recursive descent parser for test, which converts a list of OsStrings +/// Recursive descent parser for test, which converts a list of [`OsString`]s /// (typically command line arguments) into a stack of Symbols in postfix /// order. /// diff --git a/src/uu/touch/src/error.rs b/src/uu/touch/src/error.rs index 84d5112f7..13c3b3638 100644 --- a/src/uu/touch/src/error.rs +++ b/src/uu/touch/src/error.rs @@ -17,7 +17,7 @@ pub enum TouchError { #[error("{}", get_message_with_args("touch-error-unable-to-parse-date", HashMap::from([("date".to_string(), .0.clone())])))] InvalidDateFormat(String), - /// The source time couldn't be converted to a [chrono::DateTime] + /// The source time couldn't be converted to a [`chrono::DateTime`] #[error("{}", get_message_with_args("touch-error-invalid-filetime", HashMap::from([("time".to_string(), .0.to_string())])))] InvalidFiletime(FileTime), diff --git a/src/uu/touch/src/touch.rs b/src/uu/touch/src/touch.rs index 77a7e05a6..701edb1f8 100644 --- a/src/uu/touch/src/touch.rs +++ b/src/uu/touch/src/touch.rs @@ -123,9 +123,9 @@ mod format { pub(crate) const YYYYMMDDHHMM_OFFSET: &str = "%Y-%m-%d %H:%M %z"; } -/// Convert a DateTime with a TZ offset into a FileTime +/// Convert a [`DateTime`] with a TZ offset into a [`FileTime`] /// -/// The DateTime is converted into a unix timestamp from which the FileTime is +/// The [`DateTime`] is converted into a unix timestamp from which the [`FileTime`] is /// constructed. fn datetime_to_filetime(dt: &DateTime) -> FileTime { FileTime::from_unix_time(dt.timestamp(), dt.timestamp_subsec_nanos()) @@ -693,9 +693,9 @@ fn prepend_century(s: &str) -> UResult { )) } -/// Parses a timestamp string into a FileTime. +/// Parses a timestamp string into a [`FileTime`]. /// -/// This function attempts to parse a string into a FileTime +/// This function attempts to parse a string into a [`FileTime`] /// As expected by gnu touch -t : `[[cc]yy]mmddhhmm[.ss]` /// /// Note that If the year is specified with only two digits, @@ -772,9 +772,9 @@ fn parse_timestamp(s: &str) -> UResult { } // TODO: this may be a good candidate to put in fsext.rs -/// Returns a PathBuf to stdout. +/// Returns a [`PathBuf`] to stdout. /// -/// On Windows, uses GetFinalPathNameByHandleW to attempt to get the path +/// On Windows, uses `GetFinalPathNameByHandleW` to attempt to get the path /// from the stdout handle. fn pathbuf_from_stdout() -> Result { #[cfg(all(unix, not(target_os = "android")))] diff --git a/src/uu/tr/src/operation.rs b/src/uu/tr/src/operation.rs index dbade6b46..adb56bce5 100644 --- a/src/uu/tr/src/operation.rs +++ b/src/uu/tr/src/operation.rs @@ -581,7 +581,7 @@ impl Sequence { pub trait SymbolTranslator { fn translate(&mut self, current: u8) -> Option; - /// Takes two SymbolTranslators and creates a new SymbolTranslator over both in sequence. + /// Takes two [`SymbolTranslator`]s and creates a new [`SymbolTranslator`] over both in sequence. /// /// This behaves pretty much identical to [`Iterator::chain`]. fn chain(self, other: T) -> ChainedSymbolTranslator diff --git a/src/uu/uniq/src/uniq.rs b/src/uu/uniq/src/uniq.rs index f2caa9765..c218d8981 100644 --- a/src/uu/uniq/src/uniq.rs +++ b/src/uu/uniq/src/uniq.rs @@ -512,7 +512,7 @@ fn handle_extract_obs_skip_chars( } } -/// Maps Clap errors to USimpleError and overrides 3 specific ones +/// Maps Clap errors to [`USimpleError`] and overrides 3 specific ones /// to meet requirements of GNU tests for `uniq`. /// Unfortunately these overrides are necessary, since several GNU tests /// for `uniq` hardcode and require the exact wording of the error message diff --git a/src/uu/wc/src/count_fast.rs b/src/uu/wc/src/count_fast.rs index d70ba9f7c..c85fd5d80 100644 --- a/src/uu/wc/src/count_fast.rs +++ b/src/uu/wc/src/count_fast.rs @@ -198,7 +198,7 @@ pub(crate) fn count_bytes_fast(handle: &mut T) -> (usize, Opti } } -/// A simple structure used to align a BUF_SIZE buffer to 32-byte boundary. +/// A simple structure used to align a [`BUF_SIZE`] buffer to 32-byte boundary. /// /// This is useful as bytecount uses 256-bit wide vector operations that run much /// faster on aligned data (at least on x86 with AVX2 support). @@ -215,7 +215,7 @@ impl Default for AlignedBuffer { } } -/// Returns a WordCount that counts the number of bytes, lines, and/or the number of Unicode characters encoded in UTF-8 read via a Reader. +/// Returns a [`WordCount`] that counts the number of bytes, lines, and/or the number of Unicode characters encoded in UTF-8 read via a Reader. /// /// This corresponds to the `-c`, `-l` and `-m` command line flags to wc. /// diff --git a/src/uu/wc/src/utf8/mod.rs b/src/uu/wc/src/utf8/mod.rs index ea4f19392..5407a06fa 100644 --- a/src/uu/wc/src/utf8/mod.rs +++ b/src/uu/wc/src/utf8/mod.rs @@ -14,9 +14,9 @@ use std::str; /// Incremental, zero-copy UTF-8 decoding with error handling /// /// The original implementation was written by Simon Sapin in the utf-8 crate . -/// uu_wc used to depend on that crate. +/// `uu_wc` used to depend on that crate. /// The author archived the repository . -/// They suggested incorporating the source directly into uu_wc . +/// They suggested incorporating the source directly into `uu_wc` . /// #[derive(Debug, Copy, Clone)] @@ -53,9 +53,9 @@ impl Incomplete { &self.buffer[..len] } - /// (consumed_from_input, None): not enough input - /// (consumed_from_input, Some(Err(()))): error bytes in buffer - /// (consumed_from_input, Some(Ok(()))): UTF-8 string in buffer + /// `(consumed_from_input, None)`: not enough input + /// `(consumed_from_input, Some(Err(())))`: error bytes in buffer + /// `(consumed_from_input, Some(Ok(())))`: UTF-8 string in buffer fn try_complete_offsets(&mut self, input: &[u8]) -> (usize, Option>) { let initial_buffer_len = self.buffer_len as usize; let copied_from_input; diff --git a/src/uu/wc/src/wc.rs b/src/uu/wc/src/wc.rs index ad78c5ea3..ec3ed5608 100644 --- a/src/uu/wc/src/wc.rs +++ b/src/uu/wc/src/wc.rs @@ -198,7 +198,7 @@ impl<'a> Inputs<'a> { #[derive(Clone, Copy, Debug)] enum StdinKind { - /// Specified on command-line with "-" (STDIN_REPR) + /// Specified on command-line with "-" ([`STDIN_REPR`]) Explicit, /// Implied by the lack of any arguments Implicit, @@ -234,7 +234,7 @@ impl<'a, T: AsRef + ?Sized> From<&'a T> for Input<'a> { } impl<'a> Input<'a> { - /// Translates Path(Cow::Owned(_)) to Path(Cow::Borrowed(_)). + /// Translates `Path(Cow::Owned(_))` to `Path(Cow::Borrowed(_))`. fn as_borrowed(&'a self) -> Self { match self { Self::Path(p) => Self::Path(Cow::Borrowed(p.borrow())), @@ -271,7 +271,7 @@ impl<'a> Input<'a> { /// When given --files0-from, we may be given a path or stdin. Either may be a stream or /// a regular file. If given a file less than 10 MiB, it will be consumed and turned into - /// a Vec of Input::Paths which can be scanned to determine the widths of the columns that + /// a Vec of [`Input::Path`] which can be scanned to determine the widths of the columns that /// will ultimately be printed. fn try_as_files0(&self) -> UResult>>> { match self { @@ -657,11 +657,11 @@ enum CountResult { Failure(io::Error), } -/// If we fail opening a file, we only show the error. If we fail reading the +/// If we fail to open a file, we only show the error. If we fail reading the /// file, we show a count for what we managed to read. /// /// Therefore, the reading implementations always return a total and sometimes -/// return an error: (WordCount, Option). +/// return an error: ([`WordCount`], `Option`). fn word_count_from_input(input: &Input<'_>, settings: &Settings) -> CountResult { let (total, maybe_err) = match input { Input::Stdin(_) => word_count_from_reader(io::stdin().lock(), settings), @@ -734,7 +734,7 @@ fn compute_number_width(inputs: &Inputs, settings: &Settings) -> usize { type InputIterItem<'a> = Result, Box>; -/// To be used with `--files0-from=-`, this applies a filter on the results of files0_iter to +/// To be used with `--files0-from=-`, this applies a filter on the results of [`files0_iter`] to /// translate '-' into the appropriate error. fn files0_iter_stdin<'a>() -> impl Iterator> { files0_iter(io::stdin().lock(), STDIN_REPR.into()).map(|i| match i {