diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..776b65b --- /dev/null +++ b/404.html @@ -0,0 +1,623 @@ + + + +
+ + + + + + + + + + + + + + + + + + +In Python, with
statement context managers provide an extremely useful interface to execute code inside a given "runtime context." This context can define consistent, failsafe setup and teardown behavior. For example, Python's built-in file objects provide a context manager interface to ensure the underlying file resource is opened and closed cleanly, without the caller having to explicitly deal with it:
with open("hello-world.txt", "r") as f:
+ contents = f.read()
+
You can use Decoy to mock out your dependencies that should provide a context manager interface.
+Using the contextlib module, you can decorate a generator function or method to turn its yielded value into a context manager. This is a great API, and one that Decoy is well-suited to mock. To mock a generator function context manager, use decoy.Stub.then_enter_with.
+import contextlib
+from my_module.core import Core
+from my_module.config import Config, ConfigLoader
+
+def test_loads_config(decoy: Decoy) -> None:
+ """It should load config from a ConfigLoader dependency.
+
+ In this example, we know we're going to read/write config
+ to/from an external source, like the filesystem. So we want to
+ implement this dependency as a context manager to ensure
+ resource cleanup.
+ """
+ config_loader = decoy.mock(ConfigLoader)
+ config = decoy.mock(Config)
+
+ subject = Core(config_loader=config_loader)
+
+ decoy.when(config_loader.load()).then_enter_with(config)
+ decoy.when(config.read("some_flag")).then_return(True)
+
+ result = subject.get_config("some_flag")
+
+ assert result is True
+
From this test, we could sketch out the following dependency APIs...
+# config.py
+import contextlib
+from typing import Generator
+
+class Config:
+ def read(self, name: str) -> bool:
+ ...
+
+class ConfigLoader:
+ @contextlib.contextmanager
+ def load(self) -> Generator[Config, None, None]:
+ ...
+
...along with our test subject's implementation to pass the test...
+# core.py
+from .config import Config, ConfigLoader
+
+class Core:
+ def __init__(self, config_loader: ConfigLoader) -> None:
+ self._config_loader = config_loader
+
+ def get_config(self, name: str) -> bool:
+ with self._config_loader.load() as config:
+ return config.read(name)
+
A context manager is simply an object with both __enter__
and __exit__
methods defined. Decoy mocks have both these methods defined, so they are compatible with the with
statement. In the author's opinion, tests that mock __enter__
and __exit__
(or any double-underscore method) are harder to read and understand than tests that do not, so generator-based context managers should be preferred where applicable.
Using our earlier example, maybe you'd prefer to use a single Config
dependency to both load the configuration resource and read values.
import contextlib
+from my_module.core import Core
+from my_module.config import Config, ConfigLoader
+
+def test_loads_config(decoy: Decoy) -> None:
+ """It should load config from a Config dependency."""
+ config = decoy.mock(Config)
+ subject = Core(config=config)
+
+ def _handle_enter() -> Config:
+ """Ensure `read` only works if context is entered."""
+ decoy.when(config.read("some_flag")).then_return(True)
+ return config
+
+ def _handle_exit() -> None:
+ """Ensure test fails if subject calls `read` after exit."""
+ decoy.when(
+ config.read("some_flag")
+ ).then_raise(AssertionError("Context manager was exited"))
+
+ decoy.when(config.__enter__()).then_do(_handle_enter)
+ decoy.when(config.__exit__(None, None, None)).then_do(_handle_exit)
+
+ result = subject.get_config("some_flag")
+
+ assert result is True
+
From this test, our dependency APIs would be...
+# config.py
+from __future__ import annotations
+from types import TracebackType
+from typing import Type, Optional
+
+class Config:
+ def __enter__(self) -> Config:
+ ...
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ ) -> Optional[bool]:
+ ...
+
+ def read(self, name: str) -> bool:
+ ...
+
...along with our test subject's implementation to pass the test...
+# core.py
+from .config import Config
+
+class Core:
+ def __init__(self, config: Config) -> None:
+ self._config = config
+
+ def get_config(self, name: str) -> bool:
+ with self._config as loaded_config:
+ return loaded_config.read(name)
+
Decoy is also compatible with mocking the async __aenter__
and __aexit__
methods of async context managers.
import pytest
+import contextlib
+from my_module.core import Core
+from my_module.config import Config, ConfigLoader
+
+async def test_loads_config(decoy: Decoy) -> None:
+ """It should load config from a Config dependency."""
+ config = decoy.mock(Config)
+ subject = Core(config=config)
+
+ async def _handle_enter() -> Config:
+ """Ensure `read` only works if context is entered."""
+ decoy.when(config.read("some_flag")).then_return(True)
+ return config
+
+ async def _handle_exit() -> None:
+ """Ensure test fails if subject calls `read` after exit."""
+ decoy.when(
+ config.read("some_flag")
+ ).then_raise(AssertionError("Context manager was exited"))
+
+ decoy.when(await config.__aenter__()).then_do(_handle_enter)
+ decoy.when(await config.__aexit__()).then_do(_handle_exit)
+
+ result = await subject.get_config("some_flag")
+
+ assert result is True
+
This test spits out the following APIs and implementations...
+# config.py
+from __future__ import annotations
+from types import TracebackType
+from typing import Type, Optional
+
+class Config:
+ async def __aenter__(self) -> Config:
+ ...
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ ) -> Optional[bool]:
+ ...
+
+ def read(self, name: str) -> bool:
+ ...
+
...along with our test subject's implementation to pass the test...
+# core.py
+from .config import Config
+
+class Core:
+ def __init__(self, config: Config) -> None:
+ self._config = config
+
+ async def get_config(self, name: str) -> bool:
+ async with self._config as loaded_config:
+ return loaded_config.read(name)
+
Python property attributes provide an interface for creating read-only properties and properties with getters, setters, and deleters. You can use Decoy to stub properties and verify calls to property setters and deleters.
+Unlike mock method calls - which have a default return value of None
- Decoy's default return value for attribute access is another mock. So you don't need to configure anything explicitly if you need a @property
getter to return another mock; Decoy will do this for you.
class SubDep:
+ ...
+
+class Dep:
+ @property
+ def sub(self) -> SubDep:
+ ...
+
+def test(decoy: Decoy) -> None:
+ dep = decoy.mock(cls=Dep) # <- mock of class Dep
+ sub = dep.sub # <- mock of class SubDep
+ ...
+
If you would like to stub a return value for a property that is different than the default behavior, simply use the property itself as your rehearsal.
+dependency = decoy.mock(name="dependency")
+
+decoy.when(
+ dependency.some_property # <- "rehearsal" of a property getter
+).then_return(42)
+
+assert dep.some_property == 42
+
You can also configure any other behavior, like raising an error.
+dependency = decoy.mock(name="dependency")
+
+decoy.when(
+ dependency.some_property
+).then_raise(RuntimeError("oh no"))
+
+with pytest.raises(RuntimeError, match="oh no"):
+ dependency.some_property
+
While you cannot stub a return value for a getter or setter, you can stub a raise
or a side effect by combining decoy.Decoy.when with decoy.Decoy.prop.
The prop
method allows you to create rehearsals of setters and deleters. Use decoy.Prop.set to create a setter rehearsal, and decoy.Prop.delete to create a deleter rehearsal.
dependency = decoy.mock(name="dependency")
+
+decoy.when(
+ decoy.prop(dependency.some_property).set(42)
+).then_raise(RuntimeError("oh no"))
+
+decoy.when(
+ decoy.prop(dependency.some_property).delete()
+).then_raise(RuntimeError("what a disaster"))
+
+with pytest.raises(RuntimeError, match="oh no"):
+ dependency.some_property = 42
+
+with pytest.raises(RuntimeError, match="what a disaster"):
+ del dependency.some_property
+
Tip
+You cannot use decoy.Stub.then_return with property setters and deleters, because set and delete expressions do not return a value in Python.
+You can verify calls to property setters and deleters by combining decoy.Decoy.verify with decoy.Decoy.prop, the same way you would configure a stub.
+Use this feature sparingly! If you're designing a dependency that triggers a side-effect, consider using a regular method rather than a property setter/deleter. It'll probably make your code easier to read and reason with.
+Mocking and verifying property setters and deleters is most useful for testing code that needs to interact with older or legacy dependencies that would be prohibitively expensive to redesign.
+Tip
+You cannot verify
getters. The verify
method is for verifying side-effects, and it is the opinion of the author that property getters should not trigger side-effects. Getter-triggered side effects are confusing and do not communicate the design intent of a system.
Use decoy.Prop.set to create a setter rehearsal to use in decoy.Decoy.verify.
+dependency = decoy.mock(name="dependency")
+
+dependency.some_property = 42
+
+decoy.verify(
+ decoy.prop(dependency.some_property).set(42) # <- "rehearsal" of a property setter
+)
+
Use decoy.Prop.delete to create a deleter rehearsal to use in decoy.Decoy.verify.
+dependency = decoy.mock(name="dependency")
+
+del dependency.some_property
+
+decoy.verify(
+ decoy.prop(dependency.some_property).delete() # <- "rehearsal" of a property deleter
+)
+
In this example, we're developing a Core
unit, with a Config
dependency. We want to test that we get, set, and delete the port
property of the Config
dependency when various methods of Core
are used.
from decoy import Decoy
+
+
+class InvalidPortValue(ValueError):
+ """Exception raised when a given port value is invalid."""
+
+
+class Config:
+ """Config dependency."""
+
+ @property
+ def port(self) -> int:
+ ...
+
+ @port.setter
+ def port(self) -> None:
+ ...
+
+ @port.deleter
+ def port(self) -> None:
+ ...
+
+
+class Core:
+ """Core test subject."""
+
+ def __init__(self, config: Config) -> None:
+ self._config = config
+
+ def get_port(self) -> int:
+ return self._config.port
+
+ def set_port(self, port: int) -> None:
+ try:
+ self._config.port = port
+ except ValueError as e:
+ raise InvalidPortValue(str(e)) from e
+
+ def reset_port(self) -> None:
+ del self._config.port
+
+
+def test_gets_port(decoy: Decoy) -> None:
+ """Core should get the port number from its Config dependency."""
+ config = decoy.mock(cls=Config)
+ subject = Core(config=config)
+
+ decoy.when(
+ config.port # <- "rehearsal" of a property getter
+ ).then_return(42)
+
+ result = subject.get_port()
+
+ assert result == 42
+
+
+def test_rejects_invalid_port(decoy: Decoy) -> None:
+ """Core should re-raise if the port number is set to an invalid value."""
+ config = decoy.mock(cls=Config)
+ subject = Core(config=config)
+
+ decoy.when(
+ decoy.prop(config.port).set(9001) # <- "rehearsal" of a property setter
+ ).then_raise(ValueError("there's no way that can be right"))
+
+ with pytest.raises(InvalidPortValue, "there's no way"):
+ subject.set_port(9001)
+
+
+def test_sets_port(decoy: Decoy) -> None:
+ """Core should set the port number in its Config dependency."""
+ config = decoy.mock(cls=Config)
+ subject = Core(config=config)
+
+ subject.set_port(101)
+
+ decoy.verify(
+ decoy.prop(config.port).set(101) # <- "rehearsal" of a property setter
+ )
+
+def test_resets_port(decoy: Decoy) -> None:
+ """Core should delete the port number in its Config dependency."""
+ config = decoy.mock(cls=Config)
+ subject = Core(config=config)
+
+ subject.reset_port()
+
+ decoy.verify(
+ decoy.prop(config.port).delete() # <- "rehearsal" of a property deleter
+ )
+
decoy.Decoy
+
+
+#Decoy mock factory and state container.
+You should create a new Decoy instance before each test and call
+reset
after each test. If you use the
+decoy
pytest fixture, this is done
+automatically. See the setup guide for more details.
Example
+decoy = Decoy()
+
+# test your subject
+...
+
+decoy.reset()
+
decoy.Decoy.mock(*, cls=None, func=None, name=None, is_async=False)
+
+#Create a mock. See the mock creation guide for more details.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
cls |
+
+ Optional[Any]
+ |
+
+
+
+ A class definition that the mock should imitate. + |
+
+ None
+ |
+
func |
+
+ Optional[Any]
+ |
+
+
+
+ A function definition the mock should imitate. + |
+
+ None
+ |
+
name |
+
+ Optional[str]
+ |
+
+
+
+ A name to use for the mock. If you do not use
+ |
+
+ None
+ |
+
is_async |
+
+ bool
+ |
+
+
+
+ Force the returned spy to be asynchronous. This argument
+only applies if you don't use |
+
+ False
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Any
+ |
+
+
+
+ A spy typecast as the object it's imitating, if any. + |
+
Example
+def test_get_something(decoy: Decoy):
+ db = decoy.mock(cls=Database)
+ # ...
+
decoy.Decoy.prop(_rehearsal_result)
+
+#Create property setter and deleter rehearsals.
+See property mocking guide for more details.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_rehearsal_result |
+
+ ReturnT
+ |
+
+
+
+ The property to mock, for typechecking. + |
+ + required + | +
Returns:
+Type | +Description | +
---|---|
+ Prop[ReturnT]
+ |
+
+
+
+ A prop rehearser on which you can call |
+
+ Prop[ReturnT]
+ |
+
+
+
+
|
+
decoy.Decoy.reset()
+
+#Reset all mock state.
+This method should be called after every test to ensure spies and stubs
+don't leak between tests. The decoy
fixture
+provided by the pytest plugin will call reset
automatically.
The reset
method may also trigger warnings if Decoy detects any questionable
+mock usage. See decoy.warnings for more details.
decoy.Decoy.verify(*_rehearsal_results, times=None, ignore_extra_args=False)
+
+#Verify a mock was called using one or more rehearsals.
+See verification usage guide for more details.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_rehearsal_results |
+
+ Any
+ |
+
+
+
+ The return value of rehearsals, unused except +to determine how many rehearsals to verify. + |
+
+ ()
+ |
+
times |
+
+ Optional[int]
+ |
+
+
+
+ How many times the call should appear. If |
+
+ None
+ |
+
ignore_extra_args |
+
+ bool
+ |
+
+
+
+ Allow the rehearsal to specify fewer arguments than +the actual call. Decoy will compare and match any given arguments, +ignoring unspecified arguments. + |
+
+ False
+ |
+
Raises:
+Type | +Description | +
---|---|
+ VerifyError
+ |
+
+
+
+ The verification was not satisfied. + |
+
Example
+def test_create_something(decoy: Decoy):
+ gen_id = decoy.mock(func=generate_unique_id)
+
+ # ...
+
+ decoy.verify(gen_id("model-prefix_"))
+
Note
+A "rehearsal" is an actual call to the test fake. The fact that
+the call is written inside verify
is purely for typechecking and
+API sugar. Decoy will pop the last call(s) to any fake off its
+call stack, which will end up being the call inside verify
.
decoy.Decoy.when(_rehearsal_result, *, ignore_extra_args=False)
+
+#Create a Stub
configuration using a rehearsal call.
See stubbing usage guide for more details.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
_rehearsal_result |
+
+ ReturnT
+ |
+
+
+
+ The return value of a rehearsal, used for typechecking. + |
+ + required + | +
ignore_extra_args |
+
+ bool
+ |
+
+
+
+ Allow the rehearsal to specify fewer arguments than +the actual call. Decoy will compare and match any given arguments, +ignoring unspecified arguments. + |
+
+ False
+ |
+
Returns:
+Type | +Description | +
---|---|
+ Stub[ReturnT]
+ |
+
+
+
+ A stub to configure using |
+
+ Stub[ReturnT]
+ |
+
+
+
+
+ |
+
+ Stub[ReturnT]
+ |
+
+
+
+ or |
+
Example
+db = decoy.mock(cls=Database)
+decoy.when(db.exists("some-id")).then_return(True)
+
Note
+The "rehearsal" is an actual call to the test fake. Because the
+call is written inside when
, Decoy is able to infer that the call
+is a rehearsal for stub configuration purposes rather than a call
+from the code-under-test.
decoy.Stub
+
+
+#
+ Bases: Generic[ReturnT]
A rehearsed Stub that can be used to configure mock behaviors.
+See stubbing usage guide for more details.
+ + + + +decoy.Stub.then_do(action)
+
+#Configure the stub to trigger an action.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
action |
+
+ Callable[..., Union[ReturnT, Coroutine[Any, Any, ReturnT]]]
+ |
+
+
+
+ The function to call. Called with whatever arguments
+are actually passed to the stub. May be an |
+ + required + | +
Raises:
+Type | +Description | +
---|---|
+ MockNotAsyncError
+ |
+
+
+
+
|
+
decoy.Stub.then_enter_with(value)
+
+#Configure the stub to return a value wrapped in a context manager.
+The wrapping context manager is compatible with both the synchronous and +asynchronous context manager interfaces.
+See the context manager usage guide +for more details.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
value |
+
+ ContextValueT
+ |
+
+
+
+ A return value to wrap in a ContextManager. + |
+ + required + | +
decoy.Stub.then_raise(error)
+
+#Configure the stub to raise an error.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
error |
+
+ Exception
+ |
+
+
+
+ The error to raise. + |
+ + required + | +
Note
+Setting a stub to raise will prevent you from writing new
+rehearsals, because they will raise. If you need to make more calls
+to when
, you'll need to wrap your rehearsal
+in a try
.
decoy.Stub.then_return(*values)
+
+#Configure the stub to return value(s).
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
*values |
+
+ ReturnT
+ |
+
+
+
+ Zero or more return values. Multiple values will result + in different return values for subsequent calls, with the + last value latching in once all other values have returned. + |
+
+ ()
+ |
+
decoy.Prop
+
+
+#
+ Bases: Generic[ReturnT]
Rehearsal creator for mocking property setters and deleters.
+See property mocking guide for more details.
+ + + + +decoy.Prop.delete()
+
+#decoy.Prop.set(value)
+
+#Create a property setter rehearsal.
+By wrapping set
in a call to when
or
+verify
, you can stub or verify a call
+to a property setter.
Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
value |
+
+ ReturnT
+ |
+
+
+
+ The value + |
+ + required + | +
Example
+some_obj = decoy.mock()
+some_obj.prop = 42
+decoy.verify(decoy.prop(some_obj.prop).set(42))
+
decoy.matchers
+
+
+#Matcher helpers.
+A "matcher" is a class with an __eq__
method defined. Use them anywhere
+in your test where you would use an actual value for equality (==
) comparison.
Matchers help you loosen assertions where strict adherence to an exact value +is not relevant to what you're trying to test. +See the matchers guide for more details.
+Example
+from decoy import Decoy, matchers
+
+# ...
+
+def test_logger_called(decoy: Decoy):
+ # ...
+ decoy.verify(
+ logger.log(msg=matchers.StringMatching("hello"))
+ )
+
Note
+Identity comparisons (is
) will not work with matchers. Decoy only uses
+equality comparisons (==
) for stubbing and verification.
decoy.matchers.Anything()
+
+#Match anything except None.
+Example
+assert "foobar" == Anything()
+assert None != Anything()
+
decoy.matchers.Captor()
+
+#Match anything, capturing its value.
+The last captured value will be set to captor.value
. All captured
+values will be placed in the captor.values
list, which can be
+helpful if a captor needs to be triggered multiple times.
Example
+captor = Captor()
+assert "foobar" == captor
+print(captor.value) # "foobar"
+print(captor.values) # ["foobar"]
+
decoy.matchers.DictMatching(values)
+
+#Match any dictionary with the passed in keys / values.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
values |
+
+ Mapping[str, Any]
+ |
+
+
+
+ Keys and values to check. + |
+ + required + | +
Example
+value = {"hello": "world", "goodbye": "so long"}
+assert value == matchers.DictMatching({"hello": "world"})
+
decoy.matchers.ErrorMatching(error, match=None)
+
+#Match any error matching an Exception type and optional message matcher.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
error |
+
+ Type[ErrorT]
+ |
+
+
+
+ Exception type to match against. + |
+ + required + | +
match |
+
+ Optional[str]
+ |
+
+
+
+ Pattern to check against; will be compiled into an re.Pattern. + |
+
+ None
+ |
+
Example
+assert ValueError("oh no!") == ErrorMatching(ValueError)
+assert ValueError("oh no!") == ErrorMatching(ValueError, match="no")
+
decoy.matchers.HasAttributes(attributes)
+
+#Match anything with the passed in attributes.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
attributes |
+
+ Mapping[str, Any]
+ |
+
+
+
+ Attribute values to check. + |
+ + required + | +
Example
+@dataclass
+class HelloWorld:
+ hello: str = "world"
+ goodby: str = "so long"
+
+assert HelloWorld() == matchers.HasAttributes({"hello": "world"})
+
decoy.matchers.IsA(match_type, attributes=None)
+
+#Match anything that satisfies the passed in type.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
match_type |
+
+ type
+ |
+
+
+
+ Type to match. + |
+ + required + | +
attributes |
+
+ Optional[Mapping[str, Any]]
+ |
+
+
+
+ Optional set of attributes to match + |
+
+ None
+ |
+
Example
+assert "foobar" == IsA(str)
+assert datetime.now() == IsA(datetime)
+assert 42 == IsA(int)
+
+@dataclass
+class HelloWorld:
+ hello: str = "world"
+ goodby: str = "so long"
+
+assert HelloWorld() == IsA(HelloWorld, {"hello": "world"})
+
decoy.matchers.IsNot(value)
+
+#Match anything that isn't the passed in value.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
value |
+
+ object
+ |
+
+
+
+ Value to check against. + |
+ + required + | +
Example
+assert "foobar" == IsNot("bazquux")
+assert 42 == IsNot("the question")
+assert 1 != IsNot(1)
+
decoy.matchers.StringMatching(match)
+
+#Match any string matching the passed in pattern.
+ + + +Parameters:
+Name | +Type | +Description | +Default | +
---|---|---|---|
match |
+
+ str
+ |
+
+
+
+ Pattern to check against; will be compiled into an re.Pattern. + |
+ + required + | +
Example
+assert "foobar" == StringMatching("bar")
+assert "foobar" != StringMatching("^bar")
+
decoy.errors
+
+
+#Errors raised by Decoy.
+See the errors guide for more details.
+ + + +decoy.errors.MissingRehearsalError
+
+
+#
+ Bases: ValueError
An error raised when a Decoy method is called without rehearsal(s).
+This error is raised if you use when
,
+verify
, or prop
incorrectly
+in your tests. When using async/await, this error can be triggered if you
+forget to include await
with your rehearsal.
See the MissingRehearsalError guide for more details.
+ + + + +decoy.errors.MockNameRequiredError
+
+
+#
+ Bases: ValueError
An error raised if a name is not provided for a mock.
+See the MockNameRequiredError guide for more details.
+ + + + +decoy.errors.MockNotAsyncError
+
+
+#
+ Bases: TypeError
An error raised when an asynchronous function is used with a synchronous mock.
+This error is raised if you pass an async def
function
+to a synchronous stub's then_do
method.
+See the MockNotAsyncError guide for more details.
decoy.errors.VerifyError
+
+
+#
+ Bases: AssertionError
An error raised when actual calls do not match rehearsals given to verify
.
See spying with verify for more details.
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
rehearsals |
+
+ Sequence[VerifyRehearsal]
+ |
+
+
+
+ Rehearsals that were being verified. + |
+
calls |
+
+ Sequence[SpyEvent]
+ |
+
+
+
+ Actual calls to the mock(s). + |
+
times |
+
+ Optional[int]
+ |
+
+
+
+ The expected number of calls to the mock, if any. + |
+
decoy.warnings
+
+
+#Warnings raised by Decoy.
+See the warnings guide for more details.
+ + + +decoy.warnings.DecoyWarning
+
+
+#
+ Bases: UserWarning
Base class for Decoy warnings.
+ + +decoy.warnings.IncorrectCallWarning
+
+
+#
+ Bases: DecoyWarning
A warning raised if a Decoy mock with a spec is called incorrectly.
+If a call to a Decoy mock is incorrect according to inspect.signature
,
+this warning will be raised.
+See the IncorrectCallWarning guide for more details.
decoy.warnings.MiscalledStubWarning
+
+
+#
+ Bases: DecoyWarning
A warning when a configured Stub is called with non-matching arguments.
+This warning is raised if a mock is both:
+when
See the MiscalledStubWarning guide for more details.
+ + + +Attributes:
+Name | +Type | +Description | +
---|---|---|
rehearsals |
+
+ Sequence[SpyRehearsal]
+ |
+
+
+
+ The mocks's configured rehearsals. + |
+
calls |
+
+ Sequence[SpyEvent]
+ |
+
+
+
+ Actual calls to the mock. + |
+
decoy.warnings.MissingSpecAttributeWarning
+
+
+#
+ Bases: DecoyWarning
A warning raised if a Decoy mock with a spec is used with a missing attribute.
+This will become an error in the next major version of Decoy. +See the MissingSpecAttributeWarning guide for more details.
+ + +decoy.warnings.RedundantVerifyWarning
+
+
+#
+ Bases: DecoyWarning
A warning when a mock is redundantly checked with verify
.
A verify
assertion is redundant if:
See the RedundantVerifyWarning guide for more details.
+ + + + +decoy.pytest_plugin
+
+
+#Pytest plugin to setup and teardown a Decoy instance.
+The plugin will be registered with pytest when you install Decoy. It adds a +fixture without modifying any other pytest behavior. Its usage is optional +but highly recommended.
+ + + +decoy.pytest_plugin.decoy()
+
+#Get a decoy.Decoy container and reset it after the test.
+This function is function-scoped pytest fixture that will be +automatically inserted by the plugin.
+Example
+def test_my_thing(decoy: Decoy) -> None:
+ my_fake_func = decoy.mock()
+ # ...
+
{"use strict";/*!
+ * escape-html
+ * Copyright(c) 2012-2013 TJ Holowaychuk
+ * Copyright(c) 2015 Andreas Lubbe
+ * Copyright(c) 2015 Tiancheng "Timothy" Gu
+ * MIT Licensed
+ */var Ha=/["'&<>]/;Un.exports=$a;function $a(e){var t=""+e,r=Ha.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i