- Notifications
You must be signed in to change notification settings - Fork675
test: add more tests for RequiredOptional#2045
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to ourterms of service andprivacy statement. We’ll occasionally send you account related emails.
Already on GitHub?Sign in to your account
Uh oh!
There was an error while loading.Please reload this page.
Conversation
01f5fe4 toc901ef8Comparec901ef8 toce40fdeCompare| classTestRequiredOptional: | ||
| deftest_requiredoptional_empty(self): | ||
| b=types.RequiredOptional() | ||
| assertnotb.required | ||
| assertnotb.optional | ||
| assertnotb.exclusive | ||
| deftest_requiredoptional_values_no_keywords(self): | ||
| b=types.RequiredOptional( | ||
| ("required1","required2"), | ||
| ("optional1","optional2"), | ||
| ("exclusive1","exclusive2"), | ||
| ) | ||
| assertb.required== ("required1","required2") | ||
| assertb.optional== ("optional1","optional2") | ||
| assertb.exclusive== ("exclusive1","exclusive2") | ||
| deftest_requiredoptional_values_keywords(self): | ||
| b=types.RequiredOptional( | ||
| exclusive=("exclusive1","exclusive2"), | ||
| optional=("optional1","optional2"), | ||
| required=("required1","required2"), | ||
| ) | ||
| assertb.required== ("required1","required2") | ||
| assertb.optional== ("optional1","optional2") | ||
| assertb.exclusive== ("exclusive1","exclusive2") | ||
| deftest_validate_attrs_required(self): | ||
| data= {"required1":1,"optional2":2} | ||
| rq=types.RequiredOptional(required=("required1",)) | ||
| rq.validate_attrs(data=data) | ||
| data= {"optional1":1,"optional2":2} | ||
| withpytest.raises(AttributeError,match="Missing attributes: required1"): | ||
| rq.validate_attrs(data=data) | ||
| deftest_validate_attrs_exclusive(self): | ||
| data= {"exclusive1":1,"optional1":1} | ||
| rq=types.RequiredOptional(exclusive=("exclusive1","exclusive2")) | ||
| rq.validate_attrs(data=data) | ||
| data= {"exclusive1":1,"exclusive2":2,"optional1":1} | ||
| withpytest.raises( | ||
| AttributeError, | ||
| match="Provide only one of these attributes: exclusive1, exclusive2", | ||
| ): | ||
| rq.validate_attrs(data=data) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others.Learn more.
Some time ago when I was rewriting from unittest to pytest I was trying to get rid of as many test classes as possible, would be super happy to see even less of them :D Would this work? Then we just give them more descriptive names and split them per one type of assert, e.g. something like this:
| classTestRequiredOptional: | |
| deftest_requiredoptional_empty(self): | |
| b=types.RequiredOptional() | |
| assertnotb.required | |
| assertnotb.optional | |
| assertnotb.exclusive | |
| deftest_requiredoptional_values_no_keywords(self): | |
| b=types.RequiredOptional( | |
| ("required1","required2"), | |
| ("optional1","optional2"), | |
| ("exclusive1","exclusive2"), | |
| ) | |
| assertb.required== ("required1","required2") | |
| assertb.optional== ("optional1","optional2") | |
| assertb.exclusive== ("exclusive1","exclusive2") | |
| deftest_requiredoptional_values_keywords(self): | |
| b=types.RequiredOptional( | |
| exclusive=("exclusive1","exclusive2"), | |
| optional=("optional1","optional2"), | |
| required=("required1","required2"), | |
| ) | |
| assertb.required== ("required1","required2") | |
| assertb.optional== ("optional1","optional2") | |
| assertb.exclusive== ("exclusive1","exclusive2") | |
| deftest_validate_attrs_required(self): | |
| data= {"required1":1,"optional2":2} | |
| rq=types.RequiredOptional(required=("required1",)) | |
| rq.validate_attrs(data=data) | |
| data= {"optional1":1,"optional2":2} | |
| withpytest.raises(AttributeError,match="Missing attributes: required1"): | |
| rq.validate_attrs(data=data) | |
| deftest_validate_attrs_exclusive(self): | |
| data= {"exclusive1":1,"optional1":1} | |
| rq=types.RequiredOptional(exclusive=("exclusive1","exclusive2")) | |
| rq.validate_attrs(data=data) | |
| data= {"exclusive1":1,"exclusive2":2,"optional1":1} | |
| withpytest.raises( | |
| AttributeError, | |
| match="Provide only one of these attributes: exclusive1, exclusive2", | |
| ): | |
| rq.validate_attrs(data=data) | |
| deftest_required_optional_empty(self): | |
| rq=types.RequiredOptional() | |
| assertnotrq.required | |
| assertnotrq.optional | |
| assertnotrq.exclusive | |
| deftest_required_optional_values_no_keywords(self): | |
| rq=types.RequiredOptional( | |
| ("required1","required2"), | |
| ("optional1","optional2"), | |
| ("exclusive1","exclusive2"), | |
| ) | |
| assertrq.required== ("required1","required2") | |
| assertrq.optional== ("optional1","optional2") | |
| assertrq.exclusive== ("exclusive1","exclusive2") | |
| deftest_required_optional_values_with_keywords(self): | |
| rq=types.RequiredOptional( | |
| exclusive=("exclusive1","exclusive2"), | |
| optional=("optional1","optional2"), | |
| required=("required1","required2"), | |
| ) | |
| assertrq.required== ("required1","required2") | |
| assertrq.optional== ("optional1","optional2") | |
| assertrq.exclusive== ("exclusive1","exclusive2") | |
| deftest_validate_attrs_with_matching_required(self): | |
| data= {"required1":1,"optional2":2} | |
| rq=types.RequiredOptional(required=("required1",)) | |
| rq.validate_attrs(data=data) | |
| deftest_validate_attrs_with_missing_required_raises(self): | |
| data= {"optional1":1,"optional2":2} | |
| rq=types.RequiredOptional(required=("required1",)) | |
| withpytest.raises(AttributeError,match="Missing attributes: required1"): | |
| rq.validate_attrs(data=data) | |
| deftest_validate_attrs_with_matching_exclusive(self): | |
| data= {"exclusive1":1,"optional1":1} | |
| rq=types.RequiredOptional(exclusive=("exclusive1","exclusive2")) | |
| rq.validate_attrs(data=data) | |
| deftest_validate_attrs_with_extra_exclusive_raises(self): | |
| data= {"exclusive1":1,"exclusive2":2,"optional1":1} | |
| rq=types.RequiredOptional(exclusive=("exclusive1","exclusive2")) | |
| withpytest.raises( | |
| AttributeError, | |
| match="Provide only one of these attributes: exclusive1, exclusive2", | |
| ): | |
| rq.validate_attrs(data=data) |
JohnVillalovosJun 1, 2022 • edited
Loading Uh oh!
There was an error while loading.Please reload this page.
edited
Uh oh!
There was an error while loading.Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others.Learn more.
Some time ago when I was rewriting from unittest to pytest I was trying to get rid of as many test classes as possible, would be super happy to see even less of them :D Would this work? Then we just give them more descriptive names and split them per one type of assert, e.g. something like this:
@nejch Thanks for the review!
I will admit that I disagree with this. I prefer the testclass approach as it contains the test cases for a data-type (in this case) into one class. The benefits for this is that all tests for the data-type should be contained within this test class. If in the future the data-type was moved to a different module it will be relatively easy to move the test class to a different test module.
The downside to me of not using test classes is now tests can be added anywhere in the file and tests that are completely un-related to each other can be interspersed throughout the file.
As far as more descriptive names, That can be done inside the test class without issue.
But I would like to learn more on what benefit is gained by not using test classes. As willing to have my opinions changed 😊
Thanks.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others.Learn more.
Hehe, not sure I want to have a philosophical debate on test classes vs modules here so I'm fine to also merge this as we have a few others in other places 😄 But from my perspective with pytest, since it doesn't use unittest's setup/teardown, the only benefit of classes is namespacing, and for that I prefer modules, so if there was any moving I'd just put them in a separate file I think.
It's also how I see most pytest projects do it, so it's probably more natural for people who know pytest. We already tend to do test module per resource, since tests are longer and trying to do 1:1 module.py<->test_module.py ratio would result in really long test files sometimes.
IMO it would also be quite a task to switch our functions to methods properly now that our suite has grown:
(.venv) nejc@zbook-fury:~/repos/python-gitlab$ grep -r '^ def test_' tests/ | wc -l36(.venv) nejc@zbook-fury:~/repos/python-gitlab$ grep -r '^def test_' tests/ | wc -l579I'd still prefer to see 1 type of assert per test to ensure they all run as I suggested above at least in unit tests, e.g. when testing raise after asserting on success, but I think we also have quite a few of these in our tests, so I can collect those for a follow-up later.
JohnVillalovosJun 5, 2022 • edited
Loading Uh oh!
There was an error while loading.Please reload this page.
edited
Uh oh!
There was an error while loading.Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others.Learn more.
@nejch Thanks! I for sure am not suggesting we move everything to classes. Just sometimes I prefer classes.
But if do a module per area of testing, how is the file naming typically done for the test module?
Say have a file:foo.py and inside of it we have two gigantic classes we want to test:HugeClass andGiantClass. How would you name the two test modules? Would there be a directoryfoo with two test files under it? Or two test files at the root test level namedtest_foo_HugeClass.py andtest_foo_GiantClass.py? Something else?
Thanks.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others.Learn more.
I don't have a hard requirement/guideline for that, but either works for me depending on what gets a reasonable size IMO (just with snake case everywhere) :)
For example, we havemixins.py which is a single file, but we have a wholemixins/ test dir where we have multiple modules testing different aspects of it, and you know exactly what aspect is being tested right away before even having to open the file (assuming they're clearly named).
For the client, we still use the opposite (a few root-level files) approach, but in this particular case that means we get up to almost 1k lines and there's a bit of scrolling to get to a test case.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others.Learn more.
@nejch Thanks!
No description provided.