Skip to content

Commit

Permalink
refactor: Rename RuleOrigin to Linter
Browse files Browse the repository at this point in the history
"origin" was accurate since ruff rules are currently always modeled
after one origin (except the Ruff-specific rules).

Since we however want to introduce a many-to-many mapping between codes
and rules, the term "origin" no longer makes much sense. Rules usually
don't have multiple origins but one linter implements a rule first and
then others implement it later (often inspired from another linter).
But we don't actually care much about where a rule originates from when
mapping multiple rule codes to one rule implementation, so renaming
RuleOrigin to Linter is less confusing with the many-to-many system.
  • Loading branch information
not-my-profile authored and charliermarsh committed Jan 21, 2023
1 parent babe1eb commit 7fc42f8
Show file tree
Hide file tree
Showing 12 changed files with 95 additions and 95 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ jobs:
profile: minimal
override: true
- uses: Swatinem/rust-cache@v1
- run: ./scripts/add_rule.py --name DoTheThing --code PLC999 --origin pylint
- run: ./scripts/add_rule.py --name DoTheThing --code PLC999 --linter pylint
- run: cargo check
- run: ./scripts/add_plugin.py test --url https://pypi.org/project/-test/0.1.0/
- run: cargo check
Expand Down
6 changes: 3 additions & 3 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ At a high level, the steps involved in adding a new lint rule are as follows:
6. Update the generated files (documentation and generated code).

To define the violation, start by creating a dedicated file for your rule under the appropriate
rule origin (e.g., `src/rules/flake8_bugbear/rules/abstract_base_class.rs`). That file should
rule linter (e.g., `src/rules/flake8_bugbear/rules/abstract_base_class.rs`). That file should
contain a struct defined via `define_violation!`, along with a function that creates the violation
based on any required inputs. (Many of the existing examples live in `src/violations.rs`, but we're
looking to place new rules in their own files.)
Expand All @@ -81,7 +81,7 @@ collecting diagnostics as it goes.
If you need to inspect the AST, you can run `cargo +nightly dev print-ast` with a Python file. Grep
for the `Check::new` invocations to understand how other, similar rules are implemented.

To add a test fixture, create a file under `resources/test/fixtures/[origin]`, named to match
To add a test fixture, create a file under `resources/test/fixtures/[linter]`, named to match
the code you defined earlier (e.g., `resources/test/fixtures/pycodestyle/E402.py`). This file should
contain a variety of violations and non-violations designed to evaluate and demonstrate the behavior
of your lint rule.
Expand All @@ -90,7 +90,7 @@ Run `cargo +nightly dev generate-all` to generate the code for your new fixture.
locally with (e.g.) `cargo run resources/test/fixtures/pycodestyle/E402.py --no-cache --select E402`.

Once you're satisfied with the output, codify the behavior as a snapshot test by adding a new
`test_case` macro in the relevant `src/[origin]/mod.rs` file. Then, run `cargo test --all`.
`test_case` macro in the relevant `src/[linter]/mod.rs` file. Then, run `cargo test --all`.
Your test will fail, but you'll be prompted to follow-up with `cargo insta review`. Accept the
generated snapshot, then commit the snapshot file alongside the rest of your changes.

Expand Down
18 changes: 9 additions & 9 deletions build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use std::path::{Path, PathBuf};

fn main() {
let out_dir = PathBuf::from(std::env::var_os("OUT_DIR").unwrap());
generate_origin_name_and_url(&out_dir);
generate_linter_name_and_url(&out_dir);
}

const RULES_SUBMODULE_DOC_PREFIX: &str = "//! Rules from ";
Expand All @@ -15,13 +15,13 @@ const RULES_SUBMODULE_DOC_PREFIX: &str = "//! Rules from ";
/// //! Rules from [Pyflakes](https://pypi.org/project/pyflakes/2.5.0/).
///
/// This function extracts the link label and url from these comments and
/// generates the `name` and `url` functions for the `RuleOrigin` enum
/// generates the `name` and `url` functions for the `Linter` enum
/// accordingly, so that they can be used by `ruff_dev::generate_rules_table`.
fn generate_origin_name_and_url(out_dir: &Path) {
fn generate_linter_name_and_url(out_dir: &Path) {
println!("cargo:rerun-if-changed=src/rules/");

let mut name_match_arms: String = r#"RuleOrigin::Ruff => "Ruff-specific rules","#.into();
let mut url_match_arms: String = r#"RuleOrigin::Ruff => None,"#.into();
let mut name_match_arms: String = r#"Linter::Ruff => "Ruff-specific rules","#.into();
let mut url_match_arms: String = r#"Linter::Ruff => None,"#.into();

for file in fs::read_dir("src/rules/")
.unwrap()
Expand Down Expand Up @@ -62,14 +62,14 @@ fn generate_origin_name_and_url(out_dir: &Path) {
})
.collect::<String>();

name_match_arms.push_str(&format!(r#"RuleOrigin::{variant_name} => "{name}","#));
url_match_arms.push_str(&format!(r#"RuleOrigin::{variant_name} => Some("{url}"),"#));
name_match_arms.push_str(&format!(r#"Linter::{variant_name} => "{name}","#));
url_match_arms.push_str(&format!(r#"Linter::{variant_name} => Some("{url}"),"#));
}

write!(
BufWriter::new(fs::File::create(out_dir.join("origin.rs")).unwrap()),
BufWriter::new(fs::File::create(out_dir.join("linter.rs")).unwrap()),
"
impl RuleOrigin {{
impl Linter {{
pub fn name(&self) -> &'static str {{
match self {{ {name_match_arms} }}
}}
Expand Down
4 changes: 2 additions & 2 deletions ruff_cli/src/commands.rs
Original file line number Diff line number Diff line change
Expand Up @@ -285,7 +285,7 @@ pub fn show_files(
#[derive(Serialize)]
struct Explanation<'a> {
code: &'a str,
origin: &'a str,
linter: &'a str,
summary: &'a str,
}

Expand Down Expand Up @@ -315,7 +315,7 @@ pub fn explain(rule: &Rule, format: SerializationFormat) -> Result<()> {
"{}",
serde_json::to_string_pretty(&Explanation {
code: rule.code(),
origin: rule.origin().name(),
linter: rule.origin().name(),
summary: rule.message_formats()[0],
})?
);
Expand Down
8 changes: 4 additions & 4 deletions ruff_cli/tests/black_compatibility_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ use assert_cmd::Command;
use itertools::Itertools;
use log::info;
use ruff::logging::{set_up_logging, LogLevel};
use ruff::registry::RuleOrigin;
use ruff::registry::Linter;
use strum::IntoEnumIterator;
use walkdir::WalkDir;

Expand Down Expand Up @@ -175,12 +175,12 @@ fn test_ruff_black_compatibility() -> Result<()> {
.filter_map(Result::ok)
.collect();

let codes = RuleOrigin::iter()
let codes = Linter::iter()
// Exclude ruff codes, specifically RUF100, because it causes differences that are not a
// problem. Ruff would add a `# noqa: W292` after the first run, black introduces a
// newline, and ruff removes the `# noqa: W292` again.
.filter(|origin| *origin != RuleOrigin::Ruff)
.map(|origin| origin.prefixes().as_list(","))
.filter(|linter| *linter != Linter::Ruff)
.map(|linter| linter.prefixes().as_list(","))
.join(",");
let ruff_args = [
"-",
Expand Down
18 changes: 9 additions & 9 deletions ruff_dev/src/generate_rules_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

use anyhow::Result;
use clap::Args;
use ruff::registry::{Prefixes, RuleCodePrefix, RuleOrigin};
use ruff::registry::{Linter, Prefixes, RuleCodePrefix};
use strum::IntoEnumIterator;

use crate::utils::replace_readme_section;
Expand Down Expand Up @@ -47,38 +47,38 @@ pub fn main(cli: &Cli) -> Result<()> {
// Generate the table string.
let mut table_out = String::new();
let mut toc_out = String::new();
for origin in RuleOrigin::iter() {
let prefixes = origin.prefixes();
for linter in Linter::iter() {
let prefixes = linter.prefixes();
let codes_csv: String = prefixes.as_list(", ");
table_out.push_str(&format!("### {} ({codes_csv})", origin.name()));
table_out.push_str(&format!("### {} ({codes_csv})", linter.name()));
table_out.push('\n');
table_out.push('\n');

toc_out.push_str(&format!(
" 1. [{} ({})](#{}-{})\n",
origin.name(),
linter.name(),
codes_csv,
origin.name().to_lowercase().replace(' ', "-"),
linter.name().to_lowercase().replace(' ', "-"),
codes_csv.to_lowercase().replace(',', "-").replace(' ', "")
));

if let Some(url) = origin.url() {
if let Some(url) = linter.url() {
let host = url
.trim_start_matches("https://")
.split('/')
.next()
.unwrap();
table_out.push_str(&format!(
"For more, see [{}]({}) on {}.",
origin.name(),
linter.name(),
url,
match host {
"pypi.org" => "PyPI",
"github.com" => "GitHub",
host => panic!(
"unexpected host in URL of {}, expected pypi.org or github.com but found \
{host}",
origin.name()
linter.name()
),
}
));
Expand Down
14 changes: 7 additions & 7 deletions ruff_macros/src/define_rule_mapping.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ pub fn define_rule_mapping(mapping: &Mapping) -> proc_macro2::TokenStream {
.extend(quote! {Self::#name => <#path as Violation>::message_formats(),});
rule_autofixable_match_arms.extend(quote! {Self::#name => <#path as Violation>::AUTOFIX,});
let origin = get_origin(code);
rule_origin_match_arms.extend(quote! {Self::#name => RuleOrigin::#origin,});
rule_origin_match_arms.extend(quote! {Self::#name => Linter::#origin,});
rule_code_match_arms.extend(quote! {Self::#name => #code_str,});
rule_from_code_match_arms.extend(quote! {#code_str => Ok(&Rule::#name), });
diagkind_code_match_arms.extend(quote! {Self::#name(..) => &Rule::#name, });
Expand Down Expand Up @@ -95,7 +95,7 @@ pub fn define_rule_mapping(mapping: &Mapping) -> proc_macro2::TokenStream {
match self { #rule_autofixable_match_arms }
}

pub fn origin(&self) -> RuleOrigin {
pub fn origin(&self) -> Linter {
match self { #rule_origin_match_arms }
}

Expand Down Expand Up @@ -142,16 +142,16 @@ pub fn define_rule_mapping(mapping: &Mapping) -> proc_macro2::TokenStream {

fn get_origin(ident: &Ident) -> Ident {
let ident = ident.to_string();
let mut iter = crate::prefixes::PREFIX_TO_ORIGIN.iter();
let origin = loop {
let (prefix, origin) = iter
let mut iter = crate::prefixes::PREFIX_TO_LINTER.iter();
let linter = loop {
let (prefix, linter) = iter
.next()
.unwrap_or_else(|| panic!("code doesn't start with any recognized prefix: {ident}"));
if ident.starts_with(prefix) {
break origin;
break linter;
}
};
Ident::new(origin, Span::call_site())
Ident::new(linter, Span::call_site())
}
pub struct Mapping {
entries: Vec<(Ident, Path, Ident)>,
Expand Down
10 changes: 5 additions & 5 deletions ruff_macros/src/prefixes.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
// Longer prefixes should come first so that you can find an origin for a code
// Longer prefixes should come first so that you can find a linter for a code
// by simply picking the first entry that starts with the given prefix.

pub const PREFIX_TO_ORIGIN: &[(&str, &str)] = &[
pub const PREFIX_TO_LINTER: &[(&str, &str)] = &[
("ANN", "Flake8Annotations"),
("ARG", "Flake8UnusedArguments"),
("A", "Flake8Builtins"),
Expand Down Expand Up @@ -41,12 +41,12 @@ pub const PREFIX_TO_ORIGIN: &[(&str, &str)] = &[

#[cfg(test)]
mod tests {
use super::PREFIX_TO_ORIGIN;
use super::PREFIX_TO_LINTER;

#[test]
fn order() {
for (idx, (prefix, _)) in PREFIX_TO_ORIGIN.iter().enumerate() {
for (prior_prefix, _) in PREFIX_TO_ORIGIN[..idx].iter() {
for (idx, (prefix, _)) in PREFIX_TO_LINTER.iter().enumerate() {
for (prior_prefix, _) in PREFIX_TO_LINTER[..idx].iter() {
assert!(!prefix.starts_with(prior_prefix));
}
}
Expand Down
8 changes: 4 additions & 4 deletions scripts/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,13 @@
ROOT_DIR = Path(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))


def dir_name(origin: str) -> str:
return origin.replace("-", "_")
def dir_name(linter_name: str) -> str:
return linter_name.replace("-", "_")


def pascal_case(origin: str) -> str:
def pascal_case(linter_name: str) -> str:
"""Convert from snake-case to PascalCase."""
return "".join(word.title() for word in origin.split("-"))
return "".join(word.title() for word in linter_name.split("-"))


def get_indent(line: str) -> str:
Expand Down
4 changes: 2 additions & 2 deletions scripts/add_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,10 +78,10 @@ def main(*, plugin: str, url: str) -> None:
fp.write(f"{indent}{pascal_case(plugin)},")
fp.write("\n")

elif line.strip() == "RuleOrigin::Ruff => Prefixes::Single(RuleCodePrefix::RUF),":
elif line.strip() == "Linter::Ruff => Prefixes::Single(RuleCodePrefix::RUF),":
prefix = 'todo!("Fill-in prefix after generating codes")'
fp.write(
f"{indent}RuleOrigin::{pascal_case(plugin)} => Prefixes::Single({prefix}),"
f"{indent}Linter::{pascal_case(plugin)} => Prefixes::Single({prefix}),"
)
fp.write("\n")

Expand Down
20 changes: 10 additions & 10 deletions scripts/add_rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
python scripts/add_rule.py \
--name PreferListBuiltin \
--code PIE807 \
--origin flake8-pie
--linter flake8-pie
"""

import argparse
Expand All @@ -19,16 +19,16 @@ def snake_case(name: str) -> str:
return "".join(f"_{word.lower()}" if word.isupper() else word for word in name).lstrip("_")


def main(*, name: str, code: str, origin: str) -> None:
def main(*, name: str, code: str, linter: str) -> None:
# Create a test fixture.
with open(
ROOT_DIR / "resources/test/fixtures" / dir_name(origin) / f"{code}.py",
ROOT_DIR / "resources/test/fixtures" / dir_name(linter) / f"{code}.py",
"a",
):
pass

# Add the relevant `#testcase` macro.
mod_rs = ROOT_DIR / "src/rules" / dir_name(origin) / "mod.rs"
mod_rs = ROOT_DIR / "src/rules" / dir_name(linter) / "mod.rs"
content = mod_rs.read_text()

with open(mod_rs, "w") as fp:
Expand All @@ -42,7 +42,7 @@ def main(*, name: str, code: str, origin: str) -> None:
fp.write("\n")

# Add the relevant rule function.
with open(ROOT_DIR / "src/rules" / dir_name(origin) / (snake_case(name) + ".rs"), "w") as fp:
with open(ROOT_DIR / "src/rules" / dir_name(linter) / (snake_case(name) + ".rs"), "w") as fp:
fp.write(
f"""
/// {code}
Expand All @@ -59,7 +59,7 @@ def main(*, name: str, code: str, origin: str) -> None:
fp.write(line)
fp.write("\n")

if line.startswith(f"// {origin}"):
if line.startswith(f"// {linter}"):
fp.write(
"""define_violation!(
pub struct %s;
Expand Down Expand Up @@ -96,7 +96,7 @@ def main(*, name: str, code: str, origin: str) -> None:
if not seen_macro:
continue

if line.strip() == f"// {origin}":
if line.strip() == f"// {linter}":
indent = get_indent(line)
fp.write(f"{indent}{code} => violations::{name},")
fp.write("\n")
Expand All @@ -108,7 +108,7 @@ def main(*, name: str, code: str, origin: str) -> None:
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate boilerplate for a new rule.",
epilog="python scripts/add_rule.py --name PreferListBuiltin --code PIE807 --origin flake8-pie",
epilog="python scripts/add_rule.py --name PreferListBuiltin --code PIE807 --linter flake8-pie",
)
parser.add_argument(
"--name",
Expand All @@ -123,11 +123,11 @@ def main(*, name: str, code: str, origin: str) -> None:
help="The code of the check to generate (e.g., 'A001').",
)
parser.add_argument(
"--origin",
"--linter",
type=str,
required=True,
help="The source with which the check originated (e.g., 'flake8-builtins').",
)
args = parser.parse_args()

main(name=args.name, code=args.code, origin=args.origin)
main(name=args.name, code=args.code, linter=args.linter)
Loading

0 comments on commit 7fc42f8

Please sign in to comment.