Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ jobs:
- name: Deploy docs
run: |
uv run dbt-score list -f markdown -n dbt_score.rules.generic --title Generic > docs/rules/generic.md
uv run dbt-score list -f markdown -n dbt_score.rules.macros --title Macros > docs/rules/macros.md
uv run mkdocs gh-deploy --force
- uses: ncipollo/release-action@v1
with:
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ and this project adheres to
## [0.14.1] - 2025-10-09

- Migrate to `uv` project manager.
- Support linting dbt macros as a new evaluable entity type.

## [0.14.0] - 2025-08-08

Expand Down
2 changes: 1 addition & 1 deletion docs/create_rules.md
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ function is its description. Therefore, it is important to use a
self-explanatory name for the function and document it well.

The type annotation for the rule's argument dictates whether the rule should be
applied to dbt models, sources, snapshots, seeds, or exposures.
applied to dbt models, sources, snapshots, seeds, exposures, or macros.

Here is the same example rule, applied to sources:

Expand Down
1 change: 1 addition & 0 deletions docs/index.md
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ encourage) good practices. The dbt entities that `dbt-score` is able to lint
- Snapshots
- Exposures
- Seeds
- Macros

## Example

Expand Down
1 change: 1 addition & 0 deletions docs/rules/macros.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
(content generated in CI)
1 change: 1 addition & 0 deletions mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ nav:
- Programmatic invocations: programmatic_invocations.md
- Rules:
- rules/generic.md
- rules/macros.md
- rules/filters.md
- Reference:
- reference/cli.md
Expand Down
3 changes: 2 additions & 1 deletion src/dbt_score/__init__.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
"""Init dbt_score package."""

from dbt_score.models import Exposure, Model, Seed, Snapshot, Source
from dbt_score.models import Exposure, Macro, Model, Seed, Snapshot, Source
from dbt_score.rule import Rule, RuleViolation, Severity, rule
from dbt_score.rule_filter import RuleFilter, rule_filter

__all__ = [
"Exposure",
"Macro",
"Model",
"Source",
"Snapshot",
Expand Down
2 changes: 2 additions & 0 deletions src/dbt_score/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ def evaluate(self) -> None:
self._manifest_loader.snapshots.values(),
self._manifest_loader.exposures.values(),
self._manifest_loader.seeds.values(),
self._manifest_loader.macros.values(),
):
# type inference on elements from `chain` is wonky
# and resolves to superclass HasColumnsMixin
Expand Down Expand Up @@ -101,5 +102,6 @@ def evaluate(self) -> None:
or self._manifest_loader.snapshots
or self._manifest_loader.exposures
or self._manifest_loader.seeds
or self._manifest_loader.macros
):
self._formatter.project_evaluated(self.project_score)
4 changes: 3 additions & 1 deletion src/dbt_score/formatters/human_readable_formatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from dbt_score.evaluation import EvaluableResultsType
from dbt_score.formatters import Formatter
from dbt_score.models import Evaluable, Exposure, Model, Seed, Snapshot, Source
from dbt_score.models import Evaluable, Exposure, Macro, Model, Seed, Snapshot, Source
from dbt_score.rule import RuleViolation
from dbt_score.scoring import Score

Expand Down Expand Up @@ -41,6 +41,8 @@ def pretty_name(evaluable: Evaluable) -> str:
return evaluable.name
case Seed():
return evaluable.name
case Macro():
return evaluable.name
case _:
raise NotImplementedError

Expand Down
71 changes: 70 additions & 1 deletion src/dbt_score/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,60 @@ def __hash__(self) -> int:
return hash(self.unique_id)


Evaluable: TypeAlias = Model | Source | Snapshot | Seed | Exposure
@dataclass
class Macro:
"""Represents a dbt macro.

Attributes:
unique_id: The unique id of the macro (e.g. `macro.package.macro_name`).
name: The name of the macro.
description: The description of the macro.
original_file_path: The path to the macro file
(e.g. `macros/my_macro.sql`).
package_name: The name of the package this macro belongs to.
macro_sql: The SQL code of the macro.
meta: The metadata attached to the macro.
tags: The list of tags attached to the macro.
depends_on: The depends_on of the macro (macros it depends on).
arguments: The list of arguments the macro accepts.
_raw_values: The raw values of the macro in the manifest.
"""

unique_id: str
name: str
description: str
original_file_path: str
package_name: str
macro_sql: str
meta: dict[str, Any]
tags: list[str]
depends_on: dict[str, list[str]] = field(default_factory=dict)
arguments: list[dict[str, Any]] = field(default_factory=list)
_raw_values: dict[str, Any] = field(default_factory=dict)

@classmethod
def from_node(cls, node_values: dict[str, Any]) -> "Macro":
"""Create a macro object from a node in the manifest."""
return cls(
unique_id=node_values["unique_id"],
name=node_values["name"],
description=node_values.get("description", ""),
original_file_path=node_values["original_file_path"],
package_name=node_values["package_name"],
macro_sql=node_values["macro_sql"],
meta=node_values.get("meta", {}),
tags=node_values.get("tags", []),
depends_on=node_values.get("depends_on", {}),
arguments=node_values.get("arguments", []),
_raw_values=node_values,
)

def __hash__(self) -> int:
"""Compute a unique hash for a macro."""
return hash(self.unique_id)


Evaluable: TypeAlias = Model | Source | Snapshot | Seed | Exposure | Macro


class ManifestLoader:
Expand Down Expand Up @@ -677,20 +730,27 @@ def __init__(self, file_path: Path, select: Iterable[str] | None = None):
).items()
if exposure_values["package_name"] == self.project_name
}
self.raw_macros = {
macro_id: macro_values
for macro_id, macro_values in self.raw_manifest.get("macros", {}).items()
if macro_values["package_name"] == self.project_name
}

self.models: dict[str, Model] = {}
self.tests: dict[str, list[dict[str, Any]]] = defaultdict(list)
self.sources: dict[str, Source] = {}
self.snapshots: dict[str, Snapshot] = {}
self.exposures: dict[str, Exposure] = {}
self.seeds: dict[str, Seed] = {}
self.macros: dict[str, Macro] = {}

self._reindex_tests()
self._load_models()
self._load_sources()
self._load_snapshots()
self._load_exposures()
self._load_seeds()
self._load_macros()
self._populate_relatives()

if select:
Expand All @@ -702,6 +762,7 @@ def __init__(self, file_path: Path, select: Iterable[str] | None = None):
+ len(self.snapshots)
+ len(self.seeds)
+ len(self.exposures)
+ len(self.macros)
) == 0:
logger.warning("Nothing to evaluate!")

Expand Down Expand Up @@ -740,6 +801,13 @@ def _load_seeds(self) -> None:
seed = Seed.from_node(node_values, self.tests.get(node_id, []))
self.seeds[node_id] = seed

def _load_macros(self) -> None:
"""Load the macros from the manifest."""
for macro_id, macro_values in self.raw_macros.items():
if macro_values.get("resource_type") == "macro":
macro = Macro.from_node(macro_values)
self.macros[macro_id] = macro

def _reindex_tests(self) -> None:
"""Index tests based on their associated evaluable."""
for node_values in self.raw_nodes.values():
Expand Down Expand Up @@ -796,3 +864,4 @@ def _filter_evaluables(self, select: Iterable[str]) -> None:
self.snapshots = {k: s for k, s in self.snapshots.items() if s.name in selected}
self.exposures = {k: e for k, e in self.exposures.items() if e.name in selected}
self.seeds = {k: s for k, s in self.seeds.items() if s.name in selected}
self.macros = {k: m for k, m in self.macros.items() if m.name in selected}
9 changes: 8 additions & 1 deletion src/dbt_score/rule.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
overload,
)

from dbt_score.models import Evaluable, Exposure, Model, Seed, Snapshot, Source
from dbt_score.models import Evaluable, Exposure, Macro, Model, Seed, Snapshot, Source
from dbt_score.more_itertools import first_true
from dbt_score.rule_filter import RuleFilter

Expand Down Expand Up @@ -68,12 +68,14 @@ class RuleViolation:
SnapshotRuleEvaluationType: TypeAlias = Callable[[Snapshot], RuleViolation | None]
ExposureRuleEvaluationType: TypeAlias = Callable[[Exposure], RuleViolation | None]
SeedRuleEvaluationType: TypeAlias = Callable[[Seed], RuleViolation | None]
MacroRuleEvaluationType: TypeAlias = Callable[[Macro], RuleViolation | None]
RuleEvaluationType: TypeAlias = (
ModelRuleEvaluationType
| SourceRuleEvaluationType
| SnapshotRuleEvaluationType
| ExposureRuleEvaluationType
| SeedRuleEvaluationType
| MacroRuleEvaluationType
)


Expand Down Expand Up @@ -223,6 +225,11 @@ def rule(__func: SeedRuleEvaluationType) -> Type[Rule]:
...


@overload
def rule(__func: MacroRuleEvaluationType) -> Type[Rule]:
...


@overload
def rule(
*,
Expand Down
9 changes: 8 additions & 1 deletion src/dbt_score/rule_filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,22 @@
import typing
from typing import Any, Callable, Type, TypeAlias, cast, overload

from dbt_score.models import Evaluable, Exposure, Model, Seed, Snapshot, Source
from dbt_score.models import Evaluable, Exposure, Macro, Model, Seed, Snapshot, Source
from dbt_score.more_itertools import first_true

ModelFilterEvaluationType: TypeAlias = Callable[[Model], bool]
SourceFilterEvaluationType: TypeAlias = Callable[[Source], bool]
SnapshotFilterEvaluationType: TypeAlias = Callable[[Snapshot], bool]
ExposureFilterEvaluationType: TypeAlias = Callable[[Exposure], bool]
SeedRuleEvaluationType: TypeAlias = Callable[[Seed], bool]
MacroFilterEvaluationType: TypeAlias = Callable[[Macro], bool]
FilterEvaluationType: TypeAlias = (
ModelFilterEvaluationType
| SourceFilterEvaluationType
| SnapshotFilterEvaluationType
| ExposureFilterEvaluationType
| SeedRuleEvaluationType
| MacroFilterEvaluationType
)


Expand Down Expand Up @@ -101,6 +103,11 @@ def rule_filter(__func: SeedRuleEvaluationType) -> Type[RuleFilter]:
...


@overload
def rule_filter(__func: MacroFilterEvaluationType) -> Type[RuleFilter]:
...


@overload
def rule_filter(
*,
Expand Down
53 changes: 53 additions & 0 deletions src/dbt_score/rules/macros.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
"""Rules for dbt macros."""

import re

from dbt_score import Macro, RuleViolation, rule


@rule
def macro_has_description(macro: Macro) -> RuleViolation | None:
"""A macro should have a description.

Macros are reusable code that should be well-documented so other developers
can understand their purpose and usage.
"""
if not macro.description:
return RuleViolation(message="Macro lacks a description.")


@rule
def macro_arguments_have_description(macro: Macro) -> RuleViolation | None:
"""All macro arguments should have a description.

From dbt Core v1.10+, macro arguments can be documented. This helps users
understand what parameters the macro expects and how to use them correctly.
"""
if not macro.arguments:
return None

invalid_args = [
arg.get("name", "unknown")
for arg in macro.arguments
if not arg.get("description")
]
if invalid_args:
max_length = 60
message = f"Arguments lack a description: {', '.join(invalid_args)}."
if len(message) > max_length:
message = f"{message[:max_length]}…"
return RuleViolation(message=message)


@rule
def macro_name_follows_naming_convention(macro: Macro) -> RuleViolation | None:
"""A macro name should use snake_case naming convention.

Consistent naming conventions improve code readability and maintainability.
Macro names should use lowercase letters with underscores.
"""
# Check if name follows snake_case: lowercase letters, numbers, and underscores only
if not re.match(r"^[a-z0-9_]+$", macro.name):
return RuleViolation(
message="Macro name should use snake_case (lowercase with underscores)."
)
Loading