Skip to content

Commit d6259ea

Browse files
Add evaluable type in the JSON output (#104)
1 parent 3e8a7aa commit d6259ea

File tree

2 files changed

+47
-7
lines changed

2 files changed

+47
-7
lines changed

src/dbt_score/formatters/json_formatter.py

Lines changed: 19 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,8 @@
2020
"severity": "medium",
2121
"message": "Model lacks a description."
2222
}
23-
]
23+
},
24+
"type": "model"
2425
},
2526
"model_bar": {
2627
"score": 0.0,
@@ -31,19 +32,31 @@
3132
"result": "ERR",
3233
"message": "Exception message"
3334
}
34-
}
35+
},
36+
"type": "model"
37+
},
38+
"source_baz": {
39+
"score": 10.0,
40+
"badge": "🥇",
41+
"pass": false,
42+
"results": {
43+
"rule1": {
44+
"result": "ERR",
45+
"message": "Exception message"
46+
}
47+
},
48+
"type": "source"
3549
}
3650
},
3751
"project": {
38-
"score": 2.5,
39-
"badge": "🥉",
52+
"score": 5.0,
53+
"badge": "🥈",
4054
"pass": false
4155
}
4256
}
4357
```
4458
"""
4559

46-
4760
import json
4861
from typing import Any
4962

@@ -72,6 +85,7 @@ def evaluable_evaluated(
7285
"badge": score.badge,
7386
"pass": score.value >= self._config.fail_any_item_under,
7487
"results": {},
88+
"type": type(evaluable).__name__.lower(),
7589
}
7690
for rule, result in results.items():
7791
severity = rule.severity.name.lower()

tests/formatters/test_json_formatter.py

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,18 +12,20 @@ def test_json_formatter(
1212
default_config,
1313
manifest_loader,
1414
model1,
15+
source1,
1516
rule_severity_low,
1617
rule_severity_medium,
1718
rule_severity_critical,
1819
):
19-
"""Ensure the formatter has the correct output after model evaluation."""
20+
"""Ensure the formatter has the correct output after evaluation."""
2021
formatter = JSONFormatter(manifest_loader=manifest_loader, config=default_config)
2122
results: dict[Type[Rule], RuleViolation | Exception | None] = {
2223
rule_severity_low: None,
2324
rule_severity_medium: Exception("Oh noes"),
2425
rule_severity_critical: RuleViolation("Error"),
2526
}
2627
formatter.evaluable_evaluated(model1, results, Score(10.0, "🥇"))
28+
formatter.evaluable_evaluated(source1, results, Score(10.0, "🥇"))
2729
formatter.project_evaluated(Score(10.0, "🥇"))
2830
stdout = capsys.readouterr().out
2931
assert (
@@ -50,7 +52,31 @@ def test_json_formatter(
5052
"severity": "critical",
5153
"message": "Error"
5254
}
53-
}
55+
},
56+
"type": "model"
57+
},
58+
"table1": {
59+
"score": 10.0,
60+
"badge": "🥇",
61+
"pass": true,
62+
"results": {
63+
"tests.conftest.rule_severity_low": {
64+
"result": "OK",
65+
"severity": "low",
66+
"message": null
67+
},
68+
"tests.conftest.rule_severity_medium": {
69+
"result": "ERR",
70+
"severity": "medium",
71+
"message": "Oh noes"
72+
},
73+
"tests.conftest.rule_severity_critical": {
74+
"result": "WARN",
75+
"severity": "critical",
76+
"message": "Error"
77+
}
78+
},
79+
"type": "source"
5480
}
5581
},
5682
"project": {

0 commit comments

Comments
 (0)