bpo-40571: Make lru_cache(maxsize=None) more discoverable (GH-20019)

This commit is contained in:
Raymond Hettinger 2020-05-11 17:00:53 -07:00 committed by GitHub
parent 4804b5b3df
commit 21cdb711e3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 57 additions and 1 deletions

View file

@ -26,6 +26,32 @@ function for the purposes of this module.
The :mod:`functools` module defines the following functions:
.. decorator:: cache(user_function)
Simple lightweight unbounded function cache. Sometimes called
`"memoize" <https://en.wikipedia.org/wiki/Memoization>`_.
Returns the same as ``lru_cache(maxsize=None)``, creating a thin
wrapper around a dictionary lookup for the function arguments. Because it
never needs to evict old values, this is smaller and faster than
:func:`lru_cache()` with a size limit.
For example::
@cache
def factorial(n):
return n * factorial(n-1) if n else 1
>>> factorial(10) # no previously cached result, makes 11 recursive calls
3628800
>>> factorial(5) # just looks up cached value result
120
>>> factorial(12) # makes two new recursive calls, the other 10 are cached
479001600
.. versionadded:: 3.9
.. decorator:: cached_property(func)
Transform a method of a class into a property whose value is computed once

View file

@ -10,7 +10,7 @@
# See C source code for _functools credits/copyright
__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce',
'total_ordering', 'cache', 'cmp_to_key', 'lru_cache', 'reduce',
'TopologicalSorter', 'CycleError',
'partial', 'partialmethod', 'singledispatch', 'singledispatchmethod',
'cached_property']
@ -888,6 +888,15 @@ def cache_clear():
pass
################################################################################
### cache -- simplified access to the infinity cache
################################################################################
def cache(user_function, /):
'Simple lightweight unbounded cache. Sometimes called "memoize".'
return lru_cache(maxsize=None)(user_function)
################################################################################
### singledispatch() - single-dispatch generic function decorator
################################################################################

View file

@ -1432,6 +1432,25 @@ def check_order_with_hash_seed(seed):
self.assertEqual(run1, run2)
class TestCache:
# This tests that the pass-through is working as designed.
# The underlying functionality is tested in TestLRU.
def test_cache(self):
@self.module.cache
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
self.module._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
class TestLRU:
def test_lru(self):

View file

@ -0,0 +1,2 @@
Added functools.cache() as a simpler, more discoverable way to access the
unbounded cache variant of lru_cache(maxsize=None).