Skip to content

Commit d8e5bbb

Browse files
authored
Merge branch 'master' into matthieucan/lint-macros
2 parents 30ba383 + 98db52a commit d8e5bbb

File tree

4 files changed

+11
-10
lines changed

4 files changed

+11
-10
lines changed

β€ŽCHANGELOG.mdβ€Ž

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ and this project adheres to
1111
## [0.14.1] - 2025-10-09
1212

1313
- Migrate to `uv` project manager.
14+
- Round score of individual entities when showing summary.
1415
- Support linting dbt macros as a new evaluable entity type.
1516

1617
## [0.14.0] - 2025-08-08

β€Žimages/dbt-score-output.pngβ€Ž

-123 KB
Binary file not shown.

β€Žsrc/dbt_score/formatters/human_readable_formatter.pyβ€Ž

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,8 @@ def project_evaluated(self, score: Score) -> None:
9797
for evaluable, evaluable_score in self._failed_evaluables:
9898
resource_type = type(evaluable)
9999
print(
100-
f"{resource_type.__name__} "
101-
f"{self.pretty_name(evaluable)} scored {evaluable_score.value}"
100+
f"{resource_type.__name__} {self.pretty_name(evaluable)} "
101+
f"scored {evaluable_score.rounded_value}"
102102
)
103103

104104
elif score.value < self._config.fail_project_under:

β€Žtests/formatters/test_human_readable_formatter.pyβ€Ž

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -161,23 +161,23 @@ def test_human_readable_formatter_low_evaluable_score(
161161
results: EvaluableResultsType = {
162162
rule_severity_critical: RuleViolation("Error"),
163163
}
164-
formatter.evaluable_evaluated(model1, results, Score(0.0, "🚧"))
165-
formatter.evaluable_evaluated(source1, results, Score(0.0, "🚧"))
166-
formatter.project_evaluated(Score(0.0, "🚧"))
164+
formatter.evaluable_evaluated(model1, results, Score(4.99, "🚧"))
165+
formatter.evaluable_evaluated(source1, results, Score(3.33, "🚧"))
166+
formatter.project_evaluated(Score(4.16, "🚧"))
167167
stdout = capsys.readouterr().out
168168

169169
expected = """\
170-
🚧 \x1b[1mModel: model1\x1b[0m (score: 0.0)
170+
🚧 \x1b[1mModel: model1\x1b[0m (score: 4.9)
171171
\x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error
172172
173-
🚧 \x1b[1mSource: my_source.table1\x1b[0m (score: 0.0)
173+
🚧 \x1b[1mSource: my_source.table1\x1b[0m (score: 3.3)
174174
\x1b[1;33mWARN\x1b[0m (critical) tests.conftest.rule_severity_critical: Error
175175
176-
Project score: \x1b[1m0.0\x1b[0m 🚧
176+
Project score: \x1b[1m4.1\x1b[0m 🚧
177177
178178
Error: evaluable score too low, fail_any_item_under = 5.0
179-
Model model1 scored 0.0
180-
Source my_source.table1 scored 0.0
179+
Model model1 scored 4.9
180+
Source my_source.table1 scored 3.3
181181
"""
182182
assert stdout == dedent(expected)
183183

0 commit comments

Comments
Β (0)