Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

GSK-2617 Missing kwargs in a test make the whole suite fail #1748

Merged
merged 17 commits into from
Feb 8, 2024
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 9 additions & 8 deletions giskard/core/suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -433,12 +433,9 @@ def run(self, verbose: bool = True, **suite_run_args):
"""
run_args = self.default_params.copy()
run_args.update(suite_run_args)
self.verify_required_params(run_args)

results: List[SuiteResult] = list()
required_params = self.find_required_params()
undefined_params = {k: v for k, v in required_params.items() if k not in run_args}
if len(undefined_params):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

there's the same check in to_unittest that should be consistent with run

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not keep the same check but convert an exception to a warning?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes that's right, a warning make a lot more sense especially with live logs now

raise ValueError(f"Missing {len(undefined_params)} required parameters: {undefined_params}")

for test_partial in self.tests:
test_params = self.create_test_params(test_partial, run_args)
Expand Down Expand Up @@ -497,10 +494,7 @@ def to_unittest(self, **suite_gen_args) -> List[TestPartial]:
run_args.update(suite_gen_args)

unittests: List[TestPartial] = list()
required_params = self.find_required_params()
undefined_params = {k: v for k, v in required_params.items() if k not in run_args}
if len(undefined_params):
raise ValueError(f"Missing {len(undefined_params)} required parameters: {undefined_params}")
self.verify_required_params(run_args)

for test_partial in self.tests:
test_params = self.create_test_params(test_partial, run_args)
Expand All @@ -516,6 +510,13 @@ def to_unittest(self, **suite_gen_args) -> List[TestPartial]:

return unittests

def verify_required_params(self, run_args):
required_params = self.find_required_params()
undefined_params = {k: v for k, v in required_params.items() if k not in run_args}

if len(undefined_params):
warning(f"Missing {len(undefined_params)} required parameters: {undefined_params}")

@staticmethod
def create_test_params(test_partial, kwargs) -> TestParams:
if isinstance(test_partial.giskard_test, GiskardTestMethod):
Expand Down
32 changes: 26 additions & 6 deletions tests/test_programmatic.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,18 +46,38 @@ def test_a_greater_b_pass():


def test_missing_arg():
with pytest.raises(Exception, match="Missing 1 required parameters: {'b': <class 'int'>}"):
Suite().add_test(_test_a_greater_b(a=2)).run()
with pytest.warns(match="Missing 1 required parameters: {'b': <class 'int'>}"):
result = Suite().add_test(_test_a_greater_b(a=2)).run()

assert not result.passed

assert len(result.results) == 1
_, test_result, _ = result.results[0]
assert "1 validation error" in test_result.messages[0].text


def test_missing_args():
with pytest.raises(Exception, match="Missing 2 required parameters: {'a': <class 'int'>, 'b': <class 'int'>}"):
Suite().add_test(_test_a_greater_b()).run()
with pytest.warns(match="Missing 2 required parameters: {'a': <class 'int'>, 'b': <class 'int'>}"):
result = Suite().add_test(_test_a_greater_b()).run()

assert not result.passed

assert len(result.results) == 1
_, test_result, _ = result.results[0]
assert test_result.is_error
assert "2 validation errors" in test_result.messages[0].text


def test_missing_arg_one_global():
with pytest.raises(Exception, match="Missing 1 required parameters: {'b': <class 'int'>}"):
Suite().add_test(_test_a_greater_b()).run(a=2)
with pytest.warns(match="Missing 1 required parameters: {'b': <class 'int'>}"):
result = Suite().add_test(_test_a_greater_b()).run(a=2)

assert not result.passed

assert len(result.results) == 1
_, test_result, _ = result.results[0]
assert test_result.is_error
assert "1 validation error" in test_result.messages[0].text, test_result.messages[0].text


def test_all_global():
Expand Down
18 changes: 14 additions & 4 deletions tests/test_suite.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,25 @@ def test_default_parameters_are_used_at_runtime(german_credit_data, german_credi
# This will miss dataset
suite = Suite(default_params=dict(model=german_credit_model))
suite.add_test(my_test)
with pytest.raises(ValueError):
suite.run()
result = suite.run()
assert not result.passed
_, test_result, _ = result.results[0]
assert test_result.is_error

# But we can pass dataset at runtime
suite.run(dataset=german_credit_data)
result = suite.run(dataset=german_credit_data)
assert result.passed
_, test_result, _ = result.results[0]
assert not test_result.is_error
assert test_result.passed

# Or we can provide it in the suite defaults
suite.default_params["dataset"] = german_credit_data
suite.run()
result = suite.run()
assert result.passed
_, test_result, _ = result.results[0]
assert not test_result.is_error
assert test_result.passed


def test_runtime_parameters_override_default_parameters(german_credit_data, german_credit_model):
Expand Down
Loading