2018-04-11 18:37:55 +00:00
|
|
|
/*
|
|
|
|
* Memory Pool implementation logic.
|
|
|
|
*/
|
|
|
|
|
2023-02-24 00:09:23 +00:00
|
|
|
#include "git-compat-util.h"
|
2018-04-11 18:37:55 +00:00
|
|
|
#include "mem-pool.h"
|
|
|
|
|
2021-03-13 16:17:37 +00:00
|
|
|
#define BLOCK_GROWTH_SIZE (1024 * 1024 - sizeof(struct mp_block))
|
2018-07-02 19:49:34 +00:00
|
|
|
|
2022-01-23 20:33:47 +00:00
|
|
|
/*
|
|
|
|
* The inner union is an approximation for C11's max_align_t, and the
|
|
|
|
* struct + offsetof computes _Alignof. This can all just be replaced
|
|
|
|
* with _Alignof(max_align_t) if/when C11 is part of the baseline.
|
|
|
|
* Note that _Alignof(X) need not be the same as sizeof(X); it's only
|
|
|
|
* required to be a (possibly trivial) factor. They are the same for
|
|
|
|
* most architectures, but m68k for example has only 2-byte alignment
|
|
|
|
* for its 4-byte and 8-byte types, so using sizeof would waste space.
|
|
|
|
*
|
|
|
|
* Add more types to the union if the current set is insufficient.
|
|
|
|
*/
|
|
|
|
struct git_max_alignment {
|
|
|
|
char unalign;
|
|
|
|
union {
|
|
|
|
uintmax_t max_align_uintmax;
|
|
|
|
void *max_align_pointer;
|
|
|
|
} aligned;
|
|
|
|
};
|
|
|
|
#define GIT_MAX_ALIGNMENT offsetof(struct git_max_alignment, aligned)
|
|
|
|
|
2018-07-02 19:49:34 +00:00
|
|
|
/*
|
|
|
|
* Allocate a new mp_block and insert it after the block specified in
|
|
|
|
* `insert_after`. If `insert_after` is NULL, then insert block at the
|
|
|
|
* head of the linked list.
|
|
|
|
*/
|
2020-08-15 17:37:57 +00:00
|
|
|
static struct mp_block *mem_pool_alloc_block(struct mem_pool *pool,
|
|
|
|
size_t block_alloc,
|
|
|
|
struct mp_block *insert_after)
|
2018-04-11 18:37:55 +00:00
|
|
|
{
|
|
|
|
struct mp_block *p;
|
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
pool->pool_alloc += sizeof(struct mp_block) + block_alloc;
|
2018-04-11 18:37:55 +00:00
|
|
|
p = xmalloc(st_add(sizeof(struct mp_block), block_alloc));
|
2018-07-02 19:49:34 +00:00
|
|
|
|
2018-04-11 18:37:55 +00:00
|
|
|
p->next_free = (char *)p->space;
|
|
|
|
p->end = p->next_free + block_alloc;
|
2018-07-02 19:49:34 +00:00
|
|
|
|
|
|
|
if (insert_after) {
|
|
|
|
p->next_block = insert_after->next_block;
|
|
|
|
insert_after->next_block = p;
|
|
|
|
} else {
|
2020-08-15 17:37:57 +00:00
|
|
|
p->next_block = pool->mp_block;
|
|
|
|
pool->mp_block = p;
|
2018-07-02 19:49:34 +00:00
|
|
|
}
|
2018-04-11 18:37:55 +00:00
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
mem-pool: use more standard initialization and finalization
A typical memory type, such as strbuf, hashmap, or string_list can be
stored on the stack or embedded within another structure. mem_pool
cannot be, because of how mem_pool_init() and mem_pool_discard() are
written. mem_pool_init() does essentially the following (simplified
for purposes of explanation here):
void mem_pool_init(struct mem_pool **pool...)
{
*pool = xcalloc(1, sizeof(*pool));
It seems weird to require that mem_pools can only be accessed through a
pointer. It also seems slightly dangerous: unlike strbuf_release() or
strbuf_reset() or string_list_clear(), all of which put the data
structure into a state where it can be re-used after the call,
mem_pool_discard(pool) will leave pool pointing at free'd memory.
read-cache (and split-index) are the only current users of mem_pools,
and they haven't fallen into a use-after-free mistake here, but it seems
likely to be problematic for future users especially since several of
the current callers of mem_pool_init() will only call it when the
mem_pool* is not already allocated (i.e. is NULL).
This type of mechanism also prevents finding synchronization
points where one can free existing memory and then resume more
operations. It would be natural at such points to run something like
mem_pool_discard(pool...);
and, if necessary,
mem_pool_init(&pool...);
and then carry on continuing to use the pool. However, this fails badly
if several objects had a copy of the value of pool from before these
commands; in such a case, those objects won't get the updated value of
pool that mem_pool_init() overwrites pool with and they'll all instead
be reading and writing from free'd memory.
Modify mem_pool_init()/mem_pool_discard() to behave more like
strbuf_init()/strbuf_release()
or
string_list_init()/string_list_clear()
In particular: (1) make mem_pool_init() just take a mem_pool* and have
it only worry about allocating struct mp_blocks, not the struct mem_pool
itself, (2) make mem_pool_discard() free the memory that the pool was
responsible for, but leave it in a state where it can be used to
allocate more memory afterward (without the need to call mem_pool_init()
again).
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-08-15 17:37:56 +00:00
|
|
|
void mem_pool_init(struct mem_pool *pool, size_t initial_size)
|
2018-07-02 19:49:34 +00:00
|
|
|
{
|
mem-pool: use more standard initialization and finalization
A typical memory type, such as strbuf, hashmap, or string_list can be
stored on the stack or embedded within another structure. mem_pool
cannot be, because of how mem_pool_init() and mem_pool_discard() are
written. mem_pool_init() does essentially the following (simplified
for purposes of explanation here):
void mem_pool_init(struct mem_pool **pool...)
{
*pool = xcalloc(1, sizeof(*pool));
It seems weird to require that mem_pools can only be accessed through a
pointer. It also seems slightly dangerous: unlike strbuf_release() or
strbuf_reset() or string_list_clear(), all of which put the data
structure into a state where it can be re-used after the call,
mem_pool_discard(pool) will leave pool pointing at free'd memory.
read-cache (and split-index) are the only current users of mem_pools,
and they haven't fallen into a use-after-free mistake here, but it seems
likely to be problematic for future users especially since several of
the current callers of mem_pool_init() will only call it when the
mem_pool* is not already allocated (i.e. is NULL).
This type of mechanism also prevents finding synchronization
points where one can free existing memory and then resume more
operations. It would be natural at such points to run something like
mem_pool_discard(pool...);
and, if necessary,
mem_pool_init(&pool...);
and then carry on continuing to use the pool. However, this fails badly
if several objects had a copy of the value of pool from before these
commands; in such a case, those objects won't get the updated value of
pool that mem_pool_init() overwrites pool with and they'll all instead
be reading and writing from free'd memory.
Modify mem_pool_init()/mem_pool_discard() to behave more like
strbuf_init()/strbuf_release()
or
string_list_init()/string_list_clear()
In particular: (1) make mem_pool_init() just take a mem_pool* and have
it only worry about allocating struct mp_blocks, not the struct mem_pool
itself, (2) make mem_pool_discard() free the memory that the pool was
responsible for, but leave it in a state where it can be used to
allocate more memory afterward (without the need to call mem_pool_init()
again).
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-08-15 17:37:56 +00:00
|
|
|
memset(pool, 0, sizeof(*pool));
|
2018-07-02 19:49:34 +00:00
|
|
|
pool->block_alloc = BLOCK_GROWTH_SIZE;
|
|
|
|
|
|
|
|
if (initial_size > 0)
|
|
|
|
mem_pool_alloc_block(pool, initial_size, NULL);
|
|
|
|
}
|
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
void mem_pool_discard(struct mem_pool *pool, int invalidate_memory)
|
2018-07-02 19:49:34 +00:00
|
|
|
{
|
|
|
|
struct mp_block *block, *block_to_free;
|
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
block = pool->mp_block;
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
while (block)
|
2018-07-02 19:49:34 +00:00
|
|
|
{
|
|
|
|
block_to_free = block;
|
|
|
|
block = block->next_block;
|
2018-07-02 19:49:39 +00:00
|
|
|
|
|
|
|
if (invalidate_memory)
|
|
|
|
memset(block_to_free->space, 0xDD, ((char *)block_to_free->end) - ((char *)block_to_free->space));
|
|
|
|
|
2018-07-02 19:49:34 +00:00
|
|
|
free(block_to_free);
|
|
|
|
}
|
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
pool->mp_block = NULL;
|
|
|
|
pool->pool_alloc = 0;
|
2018-07-02 19:49:34 +00:00
|
|
|
}
|
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
void *mem_pool_alloc(struct mem_pool *pool, size_t len)
|
2018-04-11 18:37:55 +00:00
|
|
|
{
|
2018-07-02 19:49:33 +00:00
|
|
|
struct mp_block *p = NULL;
|
2018-04-11 18:37:55 +00:00
|
|
|
void *r;
|
|
|
|
|
2023-12-24 17:02:04 +00:00
|
|
|
len = DIV_ROUND_UP(len, GIT_MAX_ALIGNMENT) * GIT_MAX_ALIGNMENT;
|
2018-04-11 18:37:55 +00:00
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
if (pool->mp_block &&
|
|
|
|
pool->mp_block->end - pool->mp_block->next_free >= len)
|
|
|
|
p = pool->mp_block;
|
2018-04-11 18:37:55 +00:00
|
|
|
|
|
|
|
if (!p) {
|
2020-08-15 17:37:57 +00:00
|
|
|
if (len >= (pool->block_alloc / 2))
|
2023-12-28 19:19:06 +00:00
|
|
|
p = mem_pool_alloc_block(pool, len, pool->mp_block);
|
|
|
|
else
|
|
|
|
p = mem_pool_alloc_block(pool, pool->block_alloc, NULL);
|
2018-04-11 18:37:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
r = p->next_free;
|
|
|
|
p->next_free += len;
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2024-02-25 11:39:44 +00:00
|
|
|
static char *mem_pool_strvfmt(struct mem_pool *pool, const char *fmt,
|
|
|
|
va_list ap)
|
|
|
|
{
|
|
|
|
struct mp_block *block = pool->mp_block;
|
|
|
|
char *next_free = block ? block->next_free : NULL;
|
|
|
|
size_t available = block ? block->end - block->next_free : 0;
|
|
|
|
va_list cp;
|
|
|
|
int len, len2;
|
2024-03-31 18:53:07 +00:00
|
|
|
size_t size;
|
2024-02-25 11:39:44 +00:00
|
|
|
char *ret;
|
|
|
|
|
|
|
|
va_copy(cp, ap);
|
|
|
|
len = vsnprintf(next_free, available, fmt, cp);
|
|
|
|
va_end(cp);
|
|
|
|
if (len < 0)
|
|
|
|
BUG("your vsnprintf is broken (returned %d)", len);
|
|
|
|
|
2024-03-31 18:53:07 +00:00
|
|
|
size = st_add(len, 1); /* 1 for NUL */
|
|
|
|
ret = mem_pool_alloc(pool, size);
|
2024-02-25 11:39:44 +00:00
|
|
|
|
|
|
|
/* Shortcut; relies on mem_pool_alloc() not touching buffer contents. */
|
|
|
|
if (ret == next_free)
|
|
|
|
return ret;
|
|
|
|
|
2024-03-31 18:53:07 +00:00
|
|
|
len2 = vsnprintf(ret, size, fmt, ap);
|
2024-02-25 11:39:44 +00:00
|
|
|
if (len2 != len)
|
|
|
|
BUG("your vsnprintf is broken (returns inconsistent lengths)");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
char *mem_pool_strfmt(struct mem_pool *pool, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
char *ret;
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
ret = mem_pool_strvfmt(pool, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
void *mem_pool_calloc(struct mem_pool *pool, size_t count, size_t size)
|
2018-04-11 18:37:55 +00:00
|
|
|
{
|
|
|
|
size_t len = st_mult(count, size);
|
2020-08-15 17:37:57 +00:00
|
|
|
void *r = mem_pool_alloc(pool, len);
|
2018-04-11 18:37:55 +00:00
|
|
|
memset(r, 0, len);
|
|
|
|
return r;
|
|
|
|
}
|
2018-07-02 19:49:35 +00:00
|
|
|
|
2020-08-15 17:37:55 +00:00
|
|
|
char *mem_pool_strdup(struct mem_pool *pool, const char *str)
|
|
|
|
{
|
|
|
|
size_t len = strlen(str) + 1;
|
|
|
|
char *ret = mem_pool_alloc(pool, len);
|
|
|
|
|
|
|
|
return memcpy(ret, str, len);
|
|
|
|
}
|
|
|
|
|
|
|
|
char *mem_pool_strndup(struct mem_pool *pool, const char *str, size_t len)
|
|
|
|
{
|
|
|
|
char *p = memchr(str, '\0', len);
|
|
|
|
size_t actual_len = (p ? p - str : len);
|
|
|
|
char *ret = mem_pool_alloc(pool, actual_len+1);
|
|
|
|
|
|
|
|
ret[actual_len] = '\0';
|
|
|
|
return memcpy(ret, str, actual_len);
|
|
|
|
}
|
|
|
|
|
2020-08-15 17:37:57 +00:00
|
|
|
int mem_pool_contains(struct mem_pool *pool, void *mem)
|
2018-07-02 19:49:35 +00:00
|
|
|
{
|
|
|
|
struct mp_block *p;
|
|
|
|
|
|
|
|
/* Check if memory is allocated in a block */
|
2020-08-15 17:37:57 +00:00
|
|
|
for (p = pool->mp_block; p; p = p->next_block)
|
2018-07-02 19:49:35 +00:00
|
|
|
if ((mem >= ((void *)p->space)) &&
|
|
|
|
(mem < ((void *)p->end)))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void mem_pool_combine(struct mem_pool *dst, struct mem_pool *src)
|
|
|
|
{
|
|
|
|
struct mp_block *p;
|
|
|
|
|
|
|
|
/* Append the blocks from src to dst */
|
|
|
|
if (dst->mp_block && src->mp_block) {
|
|
|
|
/*
|
|
|
|
* src and dst have blocks, append
|
|
|
|
* blocks from src to dst.
|
|
|
|
*/
|
|
|
|
p = dst->mp_block;
|
|
|
|
while (p->next_block)
|
|
|
|
p = p->next_block;
|
|
|
|
|
|
|
|
p->next_block = src->mp_block;
|
|
|
|
} else if (src->mp_block) {
|
|
|
|
/*
|
|
|
|
* src has blocks, dst is empty.
|
|
|
|
*/
|
|
|
|
dst->mp_block = src->mp_block;
|
|
|
|
} else {
|
|
|
|
/* src is empty, nothing to do. */
|
|
|
|
}
|
|
|
|
|
|
|
|
dst->pool_alloc += src->pool_alloc;
|
|
|
|
src->pool_alloc = 0;
|
|
|
|
src->mp_block = NULL;
|
|
|
|
}
|