2014-06-13 12:19:36 +00:00
|
|
|
#include "cache.h"
|
|
|
|
#include "split-index.h"
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
#include "ewah/ewok.h"
|
2014-06-13 12:19:36 +00:00
|
|
|
|
|
|
|
struct split_index *init_split_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
if (!istate->split_index) {
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(istate->split_index, 1);
|
2014-06-13 12:19:36 +00:00
|
|
|
istate->split_index->refcount = 1;
|
|
|
|
}
|
|
|
|
return istate->split_index;
|
|
|
|
}
|
|
|
|
|
|
|
|
int read_link_extension(struct index_state *istate,
|
|
|
|
const void *data_, unsigned long sz)
|
|
|
|
{
|
|
|
|
const unsigned char *data = data_;
|
|
|
|
struct split_index *si;
|
2014-06-13 12:19:41 +00:00
|
|
|
int ret;
|
|
|
|
|
2018-05-02 00:25:43 +00:00
|
|
|
if (sz < the_hash_algo->rawsz)
|
2014-06-13 12:19:36 +00:00
|
|
|
return error("corrupt link extension (too short)");
|
|
|
|
si = init_split_index(istate);
|
2021-04-26 01:02:50 +00:00
|
|
|
oidread(&si->base_oid, data);
|
2018-05-02 00:25:43 +00:00
|
|
|
data += the_hash_algo->rawsz;
|
|
|
|
sz -= the_hash_algo->rawsz;
|
2014-06-13 12:19:41 +00:00
|
|
|
if (!sz)
|
|
|
|
return 0;
|
|
|
|
si->delete_bitmap = ewah_new();
|
|
|
|
ret = ewah_read_mmap(si->delete_bitmap, data, sz);
|
|
|
|
if (ret < 0)
|
|
|
|
return error("corrupt delete bitmap in link extension");
|
|
|
|
data += ret;
|
|
|
|
sz -= ret;
|
|
|
|
si->replace_bitmap = ewah_new();
|
|
|
|
ret = ewah_read_mmap(si->replace_bitmap, data, sz);
|
|
|
|
if (ret < 0)
|
|
|
|
return error("corrupt replace bitmap in link extension");
|
|
|
|
if (ret != sz)
|
2014-06-13 12:19:36 +00:00
|
|
|
return error("garbage at the end of link extension");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int write_link_extension(struct strbuf *sb,
|
|
|
|
struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct split_index *si = istate->split_index;
|
2018-05-02 00:25:43 +00:00
|
|
|
strbuf_add(sb, si->base_oid.hash, the_hash_algo->rawsz);
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
if (!si->delete_bitmap && !si->replace_bitmap)
|
|
|
|
return 0;
|
2015-03-08 10:12:32 +00:00
|
|
|
ewah_serialize_strbuf(si->delete_bitmap, sb);
|
|
|
|
ewah_serialize_strbuf(si->replace_bitmap, sb);
|
2014-06-13 12:19:36 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mark_base_index_entries(struct index_state *base)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
/*
|
|
|
|
* To keep track of the shared entries between
|
|
|
|
* istate->base->cache[] and istate->cache[], base entry
|
|
|
|
* position is stored in each base entry. All positions start
|
2016-05-06 12:36:46 +00:00
|
|
|
* from 1 instead of 0, which is reserved to say "this is a new
|
2014-06-13 12:19:36 +00:00
|
|
|
* entry".
|
|
|
|
*/
|
|
|
|
for (i = 0; i < base->cache_nr; i++)
|
|
|
|
base->cache[i]->index = i + 1;
|
|
|
|
}
|
|
|
|
|
2014-06-13 12:19:44 +00:00
|
|
|
void move_cache_to_base_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct split_index *si = istate->split_index;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
* If there was a previous base index, then transfer ownership of allocated
|
|
|
|
* entries to the parent index.
|
2014-06-13 12:19:44 +00:00
|
|
|
*/
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
if (si->base &&
|
|
|
|
si->base->ce_mem_pool) {
|
|
|
|
|
mem-pool: use more standard initialization and finalization
A typical memory type, such as strbuf, hashmap, or string_list can be
stored on the stack or embedded within another structure. mem_pool
cannot be, because of how mem_pool_init() and mem_pool_discard() are
written. mem_pool_init() does essentially the following (simplified
for purposes of explanation here):
void mem_pool_init(struct mem_pool **pool...)
{
*pool = xcalloc(1, sizeof(*pool));
It seems weird to require that mem_pools can only be accessed through a
pointer. It also seems slightly dangerous: unlike strbuf_release() or
strbuf_reset() or string_list_clear(), all of which put the data
structure into a state where it can be re-used after the call,
mem_pool_discard(pool) will leave pool pointing at free'd memory.
read-cache (and split-index) are the only current users of mem_pools,
and they haven't fallen into a use-after-free mistake here, but it seems
likely to be problematic for future users especially since several of
the current callers of mem_pool_init() will only call it when the
mem_pool* is not already allocated (i.e. is NULL).
This type of mechanism also prevents finding synchronization
points where one can free existing memory and then resume more
operations. It would be natural at such points to run something like
mem_pool_discard(pool...);
and, if necessary,
mem_pool_init(&pool...);
and then carry on continuing to use the pool. However, this fails badly
if several objects had a copy of the value of pool from before these
commands; in such a case, those objects won't get the updated value of
pool that mem_pool_init() overwrites pool with and they'll all instead
be reading and writing from free'd memory.
Modify mem_pool_init()/mem_pool_discard() to behave more like
strbuf_init()/strbuf_release()
or
string_list_init()/string_list_clear()
In particular: (1) make mem_pool_init() just take a mem_pool* and have
it only worry about allocating struct mp_blocks, not the struct mem_pool
itself, (2) make mem_pool_discard() free the memory that the pool was
responsible for, but leave it in a state where it can be used to
allocate more memory afterward (without the need to call mem_pool_init()
again).
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-08-15 17:37:56 +00:00
|
|
|
if (!istate->ce_mem_pool) {
|
|
|
|
istate->ce_mem_pool = xmalloc(sizeof(struct mem_pool));
|
|
|
|
mem_pool_init(istate->ce_mem_pool, 0);
|
|
|
|
}
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
|
|
|
|
mem_pool_combine(istate->ce_mem_pool, istate->split_index->base->ce_mem_pool);
|
|
|
|
}
|
|
|
|
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(si->base, 1);
|
2014-06-13 12:19:44 +00:00
|
|
|
si->base->version = istate->version;
|
|
|
|
/* zero timestamp disables racy test in ce_write_index() */
|
|
|
|
si->base->timestamp = istate->timestamp;
|
|
|
|
ALLOC_GROW(si->base->cache, istate->cache_nr, si->base->cache_alloc);
|
|
|
|
si->base->cache_nr = istate->cache_nr;
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The mem_pool needs to move with the allocated entries.
|
|
|
|
*/
|
|
|
|
si->base->ce_mem_pool = istate->ce_mem_pool;
|
|
|
|
istate->ce_mem_pool = NULL;
|
|
|
|
|
2016-09-25 07:24:03 +00:00
|
|
|
COPY_ARRAY(si->base->cache, istate->cache, istate->cache_nr);
|
2014-06-13 12:19:44 +00:00
|
|
|
mark_base_index_entries(si->base);
|
|
|
|
for (i = 0; i < si->base->cache_nr; i++)
|
|
|
|
si->base->cache[i]->ce_flags &= ~CE_UPDATE_IN_BASE;
|
|
|
|
}
|
|
|
|
|
2014-06-13 12:19:41 +00:00
|
|
|
static void mark_entry_for_delete(size_t pos, void *data)
|
|
|
|
{
|
|
|
|
struct index_state *istate = data;
|
|
|
|
if (pos >= istate->cache_nr)
|
|
|
|
die("position for delete %d exceeds base index size %d",
|
|
|
|
(int)pos, istate->cache_nr);
|
|
|
|
istate->cache[pos]->ce_flags |= CE_REMOVE;
|
split-index: count the number of deleted entries
'struct split_index' contains the field 'nr_deletions', whose name
with the 'nr_' prefix suggests that it contains the number of deleted
cache entries. However, barring its initialization to 0, this field
is only ever set to 1, indicating that there is at least one deleted
entry, but not the number of deleted entries. Luckily, this doesn't
cause any issues (other than confusing the reader, that is), because
the only place reading this field uses it in the same sense, i.e.: 'if
(si->nr_deletions)'.
To avoid confusion, we could either rename this field to something
like 'has_deletions' to make its name match its role, or make it a
counter of deleted cache entries to match its name.
Let's make it a counter, to keep it in sync with the related field
'nr_replacements', which does contain the number of replaced cache
entries. This will also give developers debugging the split index
code easy access to the number of deleted cache entries.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:07 +00:00
|
|
|
istate->split_index->nr_deletions++;
|
2014-06-13 12:19:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void replace_entry(size_t pos, void *data)
|
|
|
|
{
|
|
|
|
struct index_state *istate = data;
|
|
|
|
struct split_index *si = istate->split_index;
|
|
|
|
struct cache_entry *dst, *src;
|
2014-06-13 12:19:43 +00:00
|
|
|
|
2014-06-13 12:19:41 +00:00
|
|
|
if (pos >= istate->cache_nr)
|
|
|
|
die("position for replacement %d exceeds base index size %d",
|
|
|
|
(int)pos, istate->cache_nr);
|
|
|
|
if (si->nr_replacements >= si->saved_cache_nr)
|
|
|
|
die("too many replacements (%d vs %d)",
|
|
|
|
si->nr_replacements, si->saved_cache_nr);
|
|
|
|
dst = istate->cache[pos];
|
|
|
|
if (dst->ce_flags & CE_REMOVE)
|
|
|
|
die("entry %d is marked as both replaced and deleted",
|
|
|
|
(int)pos);
|
|
|
|
src = si->saved_cache[si->nr_replacements];
|
2014-06-13 12:19:43 +00:00
|
|
|
if (ce_namelen(src))
|
|
|
|
die("corrupt link extension, entry %d should have "
|
|
|
|
"zero length name", (int)pos);
|
2014-06-13 12:19:41 +00:00
|
|
|
src->index = pos + 1;
|
|
|
|
src->ce_flags |= CE_UPDATE_IN_BASE;
|
2014-06-13 12:19:43 +00:00
|
|
|
src->ce_namelen = dst->ce_namelen;
|
|
|
|
copy_cache_entry(dst, src);
|
2018-07-02 19:49:31 +00:00
|
|
|
discard_cache_entry(src);
|
2014-06-13 12:19:41 +00:00
|
|
|
si->nr_replacements++;
|
|
|
|
}
|
|
|
|
|
2014-06-13 12:19:36 +00:00
|
|
|
void merge_base_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct split_index *si = istate->split_index;
|
2014-06-13 12:19:41 +00:00
|
|
|
unsigned int i;
|
2014-06-13 12:19:36 +00:00
|
|
|
|
|
|
|
mark_base_index_entries(si->base);
|
2014-06-13 12:19:41 +00:00
|
|
|
|
|
|
|
si->saved_cache = istate->cache;
|
|
|
|
si->saved_cache_nr = istate->cache_nr;
|
|
|
|
istate->cache_nr = si->base->cache_nr;
|
|
|
|
istate->cache = NULL;
|
|
|
|
istate->cache_alloc = 0;
|
2014-06-13 12:19:36 +00:00
|
|
|
ALLOC_GROW(istate->cache, istate->cache_nr, istate->cache_alloc);
|
2016-09-25 07:24:03 +00:00
|
|
|
COPY_ARRAY(istate->cache, si->base->cache, istate->cache_nr);
|
2014-06-13 12:19:41 +00:00
|
|
|
|
|
|
|
si->nr_deletions = 0;
|
|
|
|
si->nr_replacements = 0;
|
|
|
|
ewah_each_bit(si->replace_bitmap, replace_entry, istate);
|
|
|
|
ewah_each_bit(si->delete_bitmap, mark_entry_for_delete, istate);
|
|
|
|
if (si->nr_deletions)
|
2018-12-20 13:48:16 +00:00
|
|
|
remove_marked_cache_entries(istate, 0);
|
2014-06-13 12:19:41 +00:00
|
|
|
|
|
|
|
for (i = si->nr_replacements; i < si->saved_cache_nr; i++) {
|
2014-06-13 12:19:43 +00:00
|
|
|
if (!ce_namelen(si->saved_cache[i]))
|
|
|
|
die("corrupt link extension, entry %d should "
|
|
|
|
"have non-zero length name", i);
|
2014-06-13 12:19:41 +00:00
|
|
|
add_index_entry(istate, si->saved_cache[i],
|
|
|
|
ADD_CACHE_OK_TO_ADD |
|
2014-06-13 12:19:42 +00:00
|
|
|
ADD_CACHE_KEEP_CACHE_TREE |
|
2014-06-13 12:19:41 +00:00
|
|
|
/*
|
|
|
|
* we may have to replay what
|
|
|
|
* merge-recursive.c:update_stages()
|
|
|
|
* does, which has this flag on
|
|
|
|
*/
|
|
|
|
ADD_CACHE_SKIP_DFCHECK);
|
|
|
|
si->saved_cache[i] = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
ewah_free(si->delete_bitmap);
|
|
|
|
ewah_free(si->replace_bitmap);
|
2017-06-15 23:15:49 +00:00
|
|
|
FREE_AND_NULL(si->saved_cache);
|
2014-06-13 12:19:41 +00:00
|
|
|
si->delete_bitmap = NULL;
|
|
|
|
si->replace_bitmap = NULL;
|
|
|
|
si->saved_cache_nr = 0;
|
2014-06-13 12:19:36 +00:00
|
|
|
}
|
|
|
|
|
split-index: don't compare cached data of entries already marked for split index
When unpack_trees() constructs a new index, it copies cache entries
from the original index [1]. prepare_to_write_split_index() has to
deal with this, and it has a dedicated code path for copied entries
that are present in the shared index, where it compares the cached
data in the corresponding copied and original entries. If the cached
data matches, then they are considered the same; if it differs, then
the copied entry will be marked for inclusion as a replacement entry
in the just about to be written split index by setting the
CE_UPDATE_IN_BASE flag.
However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon
reading the split index, if the entry already has a replacement entry
there, or upon refreshing the cached stat data, if the corresponding
file was modified. The state of this flag is then preserved when
unpack_trees() copies a cache entry from the shared index.
So modify prepare_to_write_split_index() to check the copied cache
entries' CE_UPDATE_IN_BASE flag first, and skip the thorough
comparison of cached data if the flag is already set. Those couple of
lines comparing the cached data would then have too many levels of
indentation, so extract them into a helper function.
Note that comparing the cached data in copied and original entries in
the shared index might actually be entirely unnecessary. In theory
all code paths refreshing the cached stat data of an entry in the
shared index should set the CE_UPDATE_IN_BASE flag in that entry, and
unpack_trees() should preserve this flag when copying cache entries.
This means that the cached data is only ever changed if the
CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to
confirm this: instrumenting the conditions in question and running the
test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the
cached data in a copied entry differs from the data in the shared
entry only if its CE_UPDATE_IN_BASE flag is indeed set.
In practice, however, our test suite doesn't have 100% coverage,
GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim
to possess complete understanding of what goes on in unpack_trees()...
Therefore I kept the comparison of the cached data when
CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future
code path were to accidentally miss setting this flag upon refreshing
the cached stat data or unpack_trees() were to drop this flag while
copying a cache entry.
[1] Note that when unpack_trees() constructs the new index and decides
that a cache entry should now refer to different content than what
was recorded in the original index (e.g. 'git read-tree -m
HEAD^'), then that can't really be considered a copy of the
original, but rather the creation of a new entry. Notably and
pertinent to the split index feature, such a new entry doesn't
have a reference to the original's shared index entry anymore,
i.e. its 'index' field is set to 0. Consequently, such an entry
is treated by prepare_to_write_split_index() as an entry not
present in the shared index and it will be added to the new split
index, while the original entry will be marked as deleted, and
neither the above discussion nor the changes in this patch apply
to them.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:08 +00:00
|
|
|
/*
|
|
|
|
* Compare most of the fields in two cache entries, i.e. all except the
|
|
|
|
* hashmap_entry and the name.
|
|
|
|
*/
|
|
|
|
static int compare_ce_content(struct cache_entry *a, struct cache_entry *b)
|
|
|
|
{
|
|
|
|
const unsigned int ondisk_flags = CE_STAGEMASK | CE_VALID |
|
|
|
|
CE_EXTENDED_FLAGS;
|
|
|
|
unsigned int ce_flags = a->ce_flags;
|
|
|
|
unsigned int base_flags = b->ce_flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* only on-disk flags matter */
|
|
|
|
a->ce_flags &= ondisk_flags;
|
|
|
|
b->ce_flags &= ondisk_flags;
|
|
|
|
ret = memcmp(&a->ce_stat_data, &b->ce_stat_data,
|
|
|
|
offsetof(struct cache_entry, name) -
|
2021-06-14 15:51:15 +00:00
|
|
|
offsetof(struct cache_entry, oid)) ||
|
|
|
|
!oideq(&a->oid, &b->oid);
|
split-index: don't compare cached data of entries already marked for split index
When unpack_trees() constructs a new index, it copies cache entries
from the original index [1]. prepare_to_write_split_index() has to
deal with this, and it has a dedicated code path for copied entries
that are present in the shared index, where it compares the cached
data in the corresponding copied and original entries. If the cached
data matches, then they are considered the same; if it differs, then
the copied entry will be marked for inclusion as a replacement entry
in the just about to be written split index by setting the
CE_UPDATE_IN_BASE flag.
However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon
reading the split index, if the entry already has a replacement entry
there, or upon refreshing the cached stat data, if the corresponding
file was modified. The state of this flag is then preserved when
unpack_trees() copies a cache entry from the shared index.
So modify prepare_to_write_split_index() to check the copied cache
entries' CE_UPDATE_IN_BASE flag first, and skip the thorough
comparison of cached data if the flag is already set. Those couple of
lines comparing the cached data would then have too many levels of
indentation, so extract them into a helper function.
Note that comparing the cached data in copied and original entries in
the shared index might actually be entirely unnecessary. In theory
all code paths refreshing the cached stat data of an entry in the
shared index should set the CE_UPDATE_IN_BASE flag in that entry, and
unpack_trees() should preserve this flag when copying cache entries.
This means that the cached data is only ever changed if the
CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to
confirm this: instrumenting the conditions in question and running the
test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the
cached data in a copied entry differs from the data in the shared
entry only if its CE_UPDATE_IN_BASE flag is indeed set.
In practice, however, our test suite doesn't have 100% coverage,
GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim
to possess complete understanding of what goes on in unpack_trees()...
Therefore I kept the comparison of the cached data when
CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future
code path were to accidentally miss setting this flag upon refreshing
the cached stat data or unpack_trees() were to drop this flag while
copying a cache entry.
[1] Note that when unpack_trees() constructs the new index and decides
that a cache entry should now refer to different content than what
was recorded in the original index (e.g. 'git read-tree -m
HEAD^'), then that can't really be considered a copy of the
original, but rather the creation of a new entry. Notably and
pertinent to the split index feature, such a new entry doesn't
have a reference to the original's shared index entry anymore,
i.e. its 'index' field is set to 0. Consequently, such an entry
is treated by prepare_to_write_split_index() as an entry not
present in the shared index and it will be added to the new split
index, while the original entry will be marked as deleted, and
neither the above discussion nor the changes in this patch apply
to them.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:08 +00:00
|
|
|
a->ce_flags = ce_flags;
|
|
|
|
b->ce_flags = base_flags;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-13 12:19:36 +00:00
|
|
|
void prepare_to_write_split_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct split_index *si = init_split_index(istate);
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
struct cache_entry **entries = NULL, *ce;
|
|
|
|
int i, nr_entries = 0, nr_alloc = 0;
|
|
|
|
|
|
|
|
si->delete_bitmap = ewah_new();
|
|
|
|
si->replace_bitmap = ewah_new();
|
|
|
|
|
|
|
|
if (si->base) {
|
|
|
|
/* Go through istate->cache[] and mark CE_MATCHED to
|
|
|
|
* entry with positive index. We'll go through
|
|
|
|
* base->cache[] later to delete all entries in base
|
2016-10-23 09:26:30 +00:00
|
|
|
* that are not marked with either CE_MATCHED or
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
* CE_UPDATE_IN_BASE. If istate->cache[i] is a
|
|
|
|
* duplicate, deduplicate it.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < istate->cache_nr; i++) {
|
|
|
|
struct cache_entry *base;
|
|
|
|
ce = istate->cache[i];
|
split-index: don't compare cached data of entries already marked for split index
When unpack_trees() constructs a new index, it copies cache entries
from the original index [1]. prepare_to_write_split_index() has to
deal with this, and it has a dedicated code path for copied entries
that are present in the shared index, where it compares the cached
data in the corresponding copied and original entries. If the cached
data matches, then they are considered the same; if it differs, then
the copied entry will be marked for inclusion as a replacement entry
in the just about to be written split index by setting the
CE_UPDATE_IN_BASE flag.
However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon
reading the split index, if the entry already has a replacement entry
there, or upon refreshing the cached stat data, if the corresponding
file was modified. The state of this flag is then preserved when
unpack_trees() copies a cache entry from the shared index.
So modify prepare_to_write_split_index() to check the copied cache
entries' CE_UPDATE_IN_BASE flag first, and skip the thorough
comparison of cached data if the flag is already set. Those couple of
lines comparing the cached data would then have too many levels of
indentation, so extract them into a helper function.
Note that comparing the cached data in copied and original entries in
the shared index might actually be entirely unnecessary. In theory
all code paths refreshing the cached stat data of an entry in the
shared index should set the CE_UPDATE_IN_BASE flag in that entry, and
unpack_trees() should preserve this flag when copying cache entries.
This means that the cached data is only ever changed if the
CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to
confirm this: instrumenting the conditions in question and running the
test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the
cached data in a copied entry differs from the data in the shared
entry only if its CE_UPDATE_IN_BASE flag is indeed set.
In practice, however, our test suite doesn't have 100% coverage,
GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim
to possess complete understanding of what goes on in unpack_trees()...
Therefore I kept the comparison of the cached data when
CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future
code path were to accidentally miss setting this flag upon refreshing
the cached stat data or unpack_trees() were to drop this flag while
copying a cache entry.
[1] Note that when unpack_trees() constructs the new index and decides
that a cache entry should now refer to different content than what
was recorded in the original index (e.g. 'git read-tree -m
HEAD^'), then that can't really be considered a copy of the
original, but rather the creation of a new entry. Notably and
pertinent to the split index feature, such a new entry doesn't
have a reference to the original's shared index entry anymore,
i.e. its 'index' field is set to 0. Consequently, such an entry
is treated by prepare_to_write_split_index() as an entry not
present in the shared index and it will be added to the new split
index, while the original entry will be marked as deleted, and
neither the above discussion nor the changes in this patch apply
to them.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:08 +00:00
|
|
|
if (!ce->index) {
|
|
|
|
/*
|
|
|
|
* During simple update index operations this
|
|
|
|
* is a cache entry that is not present in
|
|
|
|
* the shared index. It will be added to the
|
|
|
|
* split index.
|
|
|
|
*
|
|
|
|
* However, it might also represent a file
|
|
|
|
* that already has a cache entry in the
|
|
|
|
* shared index, but a new index has just
|
|
|
|
* been constructed by unpack_trees(), and
|
|
|
|
* this entry now refers to different content
|
|
|
|
* than what was recorded in the original
|
|
|
|
* index, e.g. during 'read-tree -m HEAD^' or
|
|
|
|
* 'checkout HEAD^'. In this case the
|
|
|
|
* original entry in the shared index will be
|
|
|
|
* marked as deleted, and this entry will be
|
|
|
|
* added to the split index.
|
|
|
|
*/
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
continue;
|
split-index: don't compare cached data of entries already marked for split index
When unpack_trees() constructs a new index, it copies cache entries
from the original index [1]. prepare_to_write_split_index() has to
deal with this, and it has a dedicated code path for copied entries
that are present in the shared index, where it compares the cached
data in the corresponding copied and original entries. If the cached
data matches, then they are considered the same; if it differs, then
the copied entry will be marked for inclusion as a replacement entry
in the just about to be written split index by setting the
CE_UPDATE_IN_BASE flag.
However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon
reading the split index, if the entry already has a replacement entry
there, or upon refreshing the cached stat data, if the corresponding
file was modified. The state of this flag is then preserved when
unpack_trees() copies a cache entry from the shared index.
So modify prepare_to_write_split_index() to check the copied cache
entries' CE_UPDATE_IN_BASE flag first, and skip the thorough
comparison of cached data if the flag is already set. Those couple of
lines comparing the cached data would then have too many levels of
indentation, so extract them into a helper function.
Note that comparing the cached data in copied and original entries in
the shared index might actually be entirely unnecessary. In theory
all code paths refreshing the cached stat data of an entry in the
shared index should set the CE_UPDATE_IN_BASE flag in that entry, and
unpack_trees() should preserve this flag when copying cache entries.
This means that the cached data is only ever changed if the
CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to
confirm this: instrumenting the conditions in question and running the
test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the
cached data in a copied entry differs from the data in the shared
entry only if its CE_UPDATE_IN_BASE flag is indeed set.
In practice, however, our test suite doesn't have 100% coverage,
GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim
to possess complete understanding of what goes on in unpack_trees()...
Therefore I kept the comparison of the cached data when
CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future
code path were to accidentally miss setting this flag upon refreshing
the cached stat data or unpack_trees() were to drop this flag while
copying a cache entry.
[1] Note that when unpack_trees() constructs the new index and decides
that a cache entry should now refer to different content than what
was recorded in the original index (e.g. 'git read-tree -m
HEAD^'), then that can't really be considered a copy of the
original, but rather the creation of a new entry. Notably and
pertinent to the split index feature, such a new entry doesn't
have a reference to the original's shared index entry anymore,
i.e. its 'index' field is set to 0. Consequently, such an entry
is treated by prepare_to_write_split_index() as an entry not
present in the shared index and it will be added to the new split
index, while the original entry will be marked as deleted, and
neither the above discussion nor the changes in this patch apply
to them.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:08 +00:00
|
|
|
}
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
if (ce->index > si->base->cache_nr) {
|
split-index: BUG() when cache entry refers to non-existing shared entry
When the split index feature is in use, then a cache entry is:
- either only present in the split index, in which case its 'index'
field must be 0,
- or it should refer to an existing entry in the shared index, i.e.
the 'index' field can't be greater than the size of the shared
index.
If a cache entry were to refer to a non-existing entry in the shared
index, then that's a sign of something being wrong in the index state,
either as a result of a bug in dealing with the split/shared index
entries, or perhaps a (potentially unrelated) memory corruption issue.
prepare_to_write_split_index() already has a condition to catch cache
entries with such bogus 'index' field, but instead of calling BUG() it
just sets cache entry's 'index = 0', and the entry will then be
written to the new split index.
Don't write a new index file from bogus index state, and call BUG()
upon encountering an cache entry referring to a non-existing shared
index entry.
Running the test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes'
doesn't trigger this condition.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:53:57 +00:00
|
|
|
BUG("ce refers to a shared ce at %d, which is beyond the shared index size %d",
|
|
|
|
ce->index, si->base->cache_nr);
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
}
|
|
|
|
ce->ce_flags |= CE_MATCHED; /* or "shared" */
|
|
|
|
base = si->base->cache[ce->index - 1];
|
split-index: smudge and add racily clean cache entries to split index
Ever since the split index feature was introduced [1], refreshing a
split index is prone to a variant of the classic racy git problem.
Consider the following sequence of commands updating the split index
when the shared index contains a racily clean cache entry, i.e. an
entry whose cached stat data matches with the corresponding file in
the worktree and the cached mtime matches that of the index:
echo "cached content" >file
git update-index --split-index --add file
echo "dirty worktree" >file # size stays the same!
# ... wait ...
git update-index --add other-file
Normally, when a non-split index is updated, then do_write_index()
(the function responsible for writing all kinds of indexes, "regular",
split, and shared) recognizes racily clean cache entries, and writes
them with smudged stat data, i.e. with file size set to 0. When
subsequent git commands read the index, they will notice that the
smudged stat data doesn't match with the file in the worktree, and
then go on to check the file's content and notice its dirtiness.
In the above example, however, in the second 'git update-index'
prepare_to_write_split_index() decides which cache entries stored only
in the shared index should be replaced in the new split index. Alas,
this function never looks out for racily clean cache entries, and
since the file's stat data in the worktree hasn't changed since the
shared index was written, it won't be replaced in the new split index.
Consequently, do_write_index() doesn't even get this racily clean
cache entry, and can't smudge its stat data. Subsequent git commands
will then see that the index has more recent mtime than the file and
that the (not smudged) cached stat data still matches with the file in
the worktree, and, ultimately, will erroneously consider the file
clean.
Modify prepare_to_write_split_index() to recognize racily clean cache
entries, and mark them to be added to the split index. Note that
there are two places where it should check raciness: first those cache
entries that are only stored in the shared index, and then those that
have been copied by unpack_trees() from the shared index while it
constructed a new index. This way do_write_index() will get these
racily clean cache entries as well, and will then write them with
smudged stat data to the new split index.
This change makes all tests in 't1701-racy-split-index.sh' pass, so
flip the two 'test_expect_failure' tests to success. Also add the '#'
(as in nr. of trial) to those tests' description that were omitted
when the tests expected failure.
Note that after this change if the index is split when it contains a
racily clean cache entry, then a smudged cache entry will be written
both to the new shared and to the new split indexes. This doesn't
affect regular git commands: as far as they are concerned this is just
an entry in the split index replacing an outdated entry in the shared
index. It did affect a few tests in 't1700-split-index.sh', though,
because they actually check which entries are stored in the split
index; a previous patch in this series has already made the necessary
adjustments in 't1700'. And racily clean cache entries and index
splitting are rare enough to not worry about the resulting duplicated
smudged cache entries, and the additional complexity required to
prevent them is not worth it.
Several tests failed occasionally when the test suite was run with
'GIT_TEST_SPLIT_INDEX=yes'. Here are those that I managed to trace
back to this racy split index problem, starting with those failing
more frequently, with a link to a failing Travis CI build job for
each. The highlighted line [2] shows when the racy file was written,
which is not always in the failing test but in a preceeding setup
test.
t3903-stash.sh:
https://travis-ci.org/git/git/jobs/385542084#L5858
t4024-diff-optimize-common.sh:
https://travis-ci.org/git/git/jobs/386531969#L3174
t4015-diff-whitespace.sh:
https://travis-ci.org/git/git/jobs/360797600#L8215
t2200-add-update.sh:
https://travis-ci.org/git/git/jobs/382543426#L3051
t0090-cache-tree.sh:
https://travis-ci.org/git/git/jobs/416583010#L3679
There might be others, e.g. perhaps 't1000-read-tree-m-3way.sh' and
others using 'lib-read-tree-m-3way.sh', but I couldn't confirm yet.
[1] In the branch leading to the merge commit v2.1.0-rc0~45 (Merge
branch 'nd/split-index', 2014-07-16).
[2] Note that those highlighted lines are in the 'after failure' fold,
and your browser might unhelpfully fold it up before you could
take a good look.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:09 +00:00
|
|
|
if (ce == base) {
|
|
|
|
/* The entry is present in the shared index. */
|
|
|
|
if (ce->ce_flags & CE_UPDATE_IN_BASE) {
|
|
|
|
/*
|
|
|
|
* Already marked for inclusion in
|
|
|
|
* the split index, either because
|
|
|
|
* the corresponding file was
|
|
|
|
* modified and the cached stat data
|
|
|
|
* was refreshed, or because there
|
|
|
|
* is already a replacement entry in
|
|
|
|
* the split index.
|
|
|
|
* Nothing more to do here.
|
|
|
|
*/
|
|
|
|
} else if (!ce_uptodate(ce) &&
|
|
|
|
is_racy_timestamp(istate, ce)) {
|
|
|
|
/*
|
|
|
|
* A racily clean cache entry stored
|
|
|
|
* only in the shared index: it must
|
|
|
|
* be added to the split index, so
|
|
|
|
* the subsequent do_write_index()
|
|
|
|
* can smudge its stat data.
|
|
|
|
*/
|
|
|
|
ce->ce_flags |= CE_UPDATE_IN_BASE;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The entry is only present in the
|
|
|
|
* shared index and it was not
|
|
|
|
* refreshed.
|
|
|
|
* Just leave it there.
|
|
|
|
*/
|
|
|
|
}
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
continue;
|
split-index: smudge and add racily clean cache entries to split index
Ever since the split index feature was introduced [1], refreshing a
split index is prone to a variant of the classic racy git problem.
Consider the following sequence of commands updating the split index
when the shared index contains a racily clean cache entry, i.e. an
entry whose cached stat data matches with the corresponding file in
the worktree and the cached mtime matches that of the index:
echo "cached content" >file
git update-index --split-index --add file
echo "dirty worktree" >file # size stays the same!
# ... wait ...
git update-index --add other-file
Normally, when a non-split index is updated, then do_write_index()
(the function responsible for writing all kinds of indexes, "regular",
split, and shared) recognizes racily clean cache entries, and writes
them with smudged stat data, i.e. with file size set to 0. When
subsequent git commands read the index, they will notice that the
smudged stat data doesn't match with the file in the worktree, and
then go on to check the file's content and notice its dirtiness.
In the above example, however, in the second 'git update-index'
prepare_to_write_split_index() decides which cache entries stored only
in the shared index should be replaced in the new split index. Alas,
this function never looks out for racily clean cache entries, and
since the file's stat data in the worktree hasn't changed since the
shared index was written, it won't be replaced in the new split index.
Consequently, do_write_index() doesn't even get this racily clean
cache entry, and can't smudge its stat data. Subsequent git commands
will then see that the index has more recent mtime than the file and
that the (not smudged) cached stat data still matches with the file in
the worktree, and, ultimately, will erroneously consider the file
clean.
Modify prepare_to_write_split_index() to recognize racily clean cache
entries, and mark them to be added to the split index. Note that
there are two places where it should check raciness: first those cache
entries that are only stored in the shared index, and then those that
have been copied by unpack_trees() from the shared index while it
constructed a new index. This way do_write_index() will get these
racily clean cache entries as well, and will then write them with
smudged stat data to the new split index.
This change makes all tests in 't1701-racy-split-index.sh' pass, so
flip the two 'test_expect_failure' tests to success. Also add the '#'
(as in nr. of trial) to those tests' description that were omitted
when the tests expected failure.
Note that after this change if the index is split when it contains a
racily clean cache entry, then a smudged cache entry will be written
both to the new shared and to the new split indexes. This doesn't
affect regular git commands: as far as they are concerned this is just
an entry in the split index replacing an outdated entry in the shared
index. It did affect a few tests in 't1700-split-index.sh', though,
because they actually check which entries are stored in the split
index; a previous patch in this series has already made the necessary
adjustments in 't1700'. And racily clean cache entries and index
splitting are rare enough to not worry about the resulting duplicated
smudged cache entries, and the additional complexity required to
prevent them is not worth it.
Several tests failed occasionally when the test suite was run with
'GIT_TEST_SPLIT_INDEX=yes'. Here are those that I managed to trace
back to this racy split index problem, starting with those failing
more frequently, with a link to a failing Travis CI build job for
each. The highlighted line [2] shows when the racy file was written,
which is not always in the failing test but in a preceeding setup
test.
t3903-stash.sh:
https://travis-ci.org/git/git/jobs/385542084#L5858
t4024-diff-optimize-common.sh:
https://travis-ci.org/git/git/jobs/386531969#L3174
t4015-diff-whitespace.sh:
https://travis-ci.org/git/git/jobs/360797600#L8215
t2200-add-update.sh:
https://travis-ci.org/git/git/jobs/382543426#L3051
t0090-cache-tree.sh:
https://travis-ci.org/git/git/jobs/416583010#L3679
There might be others, e.g. perhaps 't1000-read-tree-m-3way.sh' and
others using 'lib-read-tree-m-3way.sh', but I couldn't confirm yet.
[1] In the branch leading to the merge commit v2.1.0-rc0~45 (Merge
branch 'nd/split-index', 2014-07-16).
[2] Note that those highlighted lines are in the 'after failure' fold,
and your browser might unhelpfully fold it up before you could
take a good look.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:09 +00:00
|
|
|
}
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
if (ce->ce_namelen != base->ce_namelen ||
|
|
|
|
strcmp(ce->name, base->name)) {
|
|
|
|
ce->index = 0;
|
|
|
|
continue;
|
|
|
|
}
|
split-index: don't compare cached data of entries already marked for split index
When unpack_trees() constructs a new index, it copies cache entries
from the original index [1]. prepare_to_write_split_index() has to
deal with this, and it has a dedicated code path for copied entries
that are present in the shared index, where it compares the cached
data in the corresponding copied and original entries. If the cached
data matches, then they are considered the same; if it differs, then
the copied entry will be marked for inclusion as a replacement entry
in the just about to be written split index by setting the
CE_UPDATE_IN_BASE flag.
However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon
reading the split index, if the entry already has a replacement entry
there, or upon refreshing the cached stat data, if the corresponding
file was modified. The state of this flag is then preserved when
unpack_trees() copies a cache entry from the shared index.
So modify prepare_to_write_split_index() to check the copied cache
entries' CE_UPDATE_IN_BASE flag first, and skip the thorough
comparison of cached data if the flag is already set. Those couple of
lines comparing the cached data would then have too many levels of
indentation, so extract them into a helper function.
Note that comparing the cached data in copied and original entries in
the shared index might actually be entirely unnecessary. In theory
all code paths refreshing the cached stat data of an entry in the
shared index should set the CE_UPDATE_IN_BASE flag in that entry, and
unpack_trees() should preserve this flag when copying cache entries.
This means that the cached data is only ever changed if the
CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to
confirm this: instrumenting the conditions in question and running the
test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the
cached data in a copied entry differs from the data in the shared
entry only if its CE_UPDATE_IN_BASE flag is indeed set.
In practice, however, our test suite doesn't have 100% coverage,
GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim
to possess complete understanding of what goes on in unpack_trees()...
Therefore I kept the comparison of the cached data when
CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future
code path were to accidentally miss setting this flag upon refreshing
the cached stat data or unpack_trees() were to drop this flag while
copying a cache entry.
[1] Note that when unpack_trees() constructs the new index and decides
that a cache entry should now refer to different content than what
was recorded in the original index (e.g. 'git read-tree -m
HEAD^'), then that can't really be considered a copy of the
original, but rather the creation of a new entry. Notably and
pertinent to the split index feature, such a new entry doesn't
have a reference to the original's shared index entry anymore,
i.e. its 'index' field is set to 0. Consequently, such an entry
is treated by prepare_to_write_split_index() as an entry not
present in the shared index and it will be added to the new split
index, while the original entry will be marked as deleted, and
neither the above discussion nor the changes in this patch apply
to them.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:08 +00:00
|
|
|
/*
|
|
|
|
* This is the copy of a cache entry that is present
|
|
|
|
* in the shared index, created by unpack_trees()
|
|
|
|
* while it constructed a new index.
|
|
|
|
*/
|
|
|
|
if (ce->ce_flags & CE_UPDATE_IN_BASE) {
|
|
|
|
/*
|
|
|
|
* Already marked for inclusion in the split
|
|
|
|
* index, either because the corresponding
|
|
|
|
* file was modified and the cached stat data
|
|
|
|
* was refreshed, or because the original
|
|
|
|
* entry already had a replacement entry in
|
|
|
|
* the split index.
|
|
|
|
* Nothing to do.
|
|
|
|
*/
|
split-index: smudge and add racily clean cache entries to split index
Ever since the split index feature was introduced [1], refreshing a
split index is prone to a variant of the classic racy git problem.
Consider the following sequence of commands updating the split index
when the shared index contains a racily clean cache entry, i.e. an
entry whose cached stat data matches with the corresponding file in
the worktree and the cached mtime matches that of the index:
echo "cached content" >file
git update-index --split-index --add file
echo "dirty worktree" >file # size stays the same!
# ... wait ...
git update-index --add other-file
Normally, when a non-split index is updated, then do_write_index()
(the function responsible for writing all kinds of indexes, "regular",
split, and shared) recognizes racily clean cache entries, and writes
them with smudged stat data, i.e. with file size set to 0. When
subsequent git commands read the index, they will notice that the
smudged stat data doesn't match with the file in the worktree, and
then go on to check the file's content and notice its dirtiness.
In the above example, however, in the second 'git update-index'
prepare_to_write_split_index() decides which cache entries stored only
in the shared index should be replaced in the new split index. Alas,
this function never looks out for racily clean cache entries, and
since the file's stat data in the worktree hasn't changed since the
shared index was written, it won't be replaced in the new split index.
Consequently, do_write_index() doesn't even get this racily clean
cache entry, and can't smudge its stat data. Subsequent git commands
will then see that the index has more recent mtime than the file and
that the (not smudged) cached stat data still matches with the file in
the worktree, and, ultimately, will erroneously consider the file
clean.
Modify prepare_to_write_split_index() to recognize racily clean cache
entries, and mark them to be added to the split index. Note that
there are two places where it should check raciness: first those cache
entries that are only stored in the shared index, and then those that
have been copied by unpack_trees() from the shared index while it
constructed a new index. This way do_write_index() will get these
racily clean cache entries as well, and will then write them with
smudged stat data to the new split index.
This change makes all tests in 't1701-racy-split-index.sh' pass, so
flip the two 'test_expect_failure' tests to success. Also add the '#'
(as in nr. of trial) to those tests' description that were omitted
when the tests expected failure.
Note that after this change if the index is split when it contains a
racily clean cache entry, then a smudged cache entry will be written
both to the new shared and to the new split indexes. This doesn't
affect regular git commands: as far as they are concerned this is just
an entry in the split index replacing an outdated entry in the shared
index. It did affect a few tests in 't1700-split-index.sh', though,
because they actually check which entries are stored in the split
index; a previous patch in this series has already made the necessary
adjustments in 't1700'. And racily clean cache entries and index
splitting are rare enough to not worry about the resulting duplicated
smudged cache entries, and the additional complexity required to
prevent them is not worth it.
Several tests failed occasionally when the test suite was run with
'GIT_TEST_SPLIT_INDEX=yes'. Here are those that I managed to trace
back to this racy split index problem, starting with those failing
more frequently, with a link to a failing Travis CI build job for
each. The highlighted line [2] shows when the racy file was written,
which is not always in the failing test but in a preceeding setup
test.
t3903-stash.sh:
https://travis-ci.org/git/git/jobs/385542084#L5858
t4024-diff-optimize-common.sh:
https://travis-ci.org/git/git/jobs/386531969#L3174
t4015-diff-whitespace.sh:
https://travis-ci.org/git/git/jobs/360797600#L8215
t2200-add-update.sh:
https://travis-ci.org/git/git/jobs/382543426#L3051
t0090-cache-tree.sh:
https://travis-ci.org/git/git/jobs/416583010#L3679
There might be others, e.g. perhaps 't1000-read-tree-m-3way.sh' and
others using 'lib-read-tree-m-3way.sh', but I couldn't confirm yet.
[1] In the branch leading to the merge commit v2.1.0-rc0~45 (Merge
branch 'nd/split-index', 2014-07-16).
[2] Note that those highlighted lines are in the 'after failure' fold,
and your browser might unhelpfully fold it up before you could
take a good look.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:09 +00:00
|
|
|
} else if (!ce_uptodate(ce) &&
|
|
|
|
is_racy_timestamp(istate, ce)) {
|
|
|
|
/*
|
|
|
|
* A copy of a racily clean cache entry from
|
|
|
|
* the shared index. It must be added to
|
|
|
|
* the split index, so the subsequent
|
|
|
|
* do_write_index() can smudge its stat data.
|
|
|
|
*/
|
|
|
|
ce->ce_flags |= CE_UPDATE_IN_BASE;
|
split-index: don't compare cached data of entries already marked for split index
When unpack_trees() constructs a new index, it copies cache entries
from the original index [1]. prepare_to_write_split_index() has to
deal with this, and it has a dedicated code path for copied entries
that are present in the shared index, where it compares the cached
data in the corresponding copied and original entries. If the cached
data matches, then they are considered the same; if it differs, then
the copied entry will be marked for inclusion as a replacement entry
in the just about to be written split index by setting the
CE_UPDATE_IN_BASE flag.
However, a cache entry already has its CE_UPDATE_IN_BASE flag set upon
reading the split index, if the entry already has a replacement entry
there, or upon refreshing the cached stat data, if the corresponding
file was modified. The state of this flag is then preserved when
unpack_trees() copies a cache entry from the shared index.
So modify prepare_to_write_split_index() to check the copied cache
entries' CE_UPDATE_IN_BASE flag first, and skip the thorough
comparison of cached data if the flag is already set. Those couple of
lines comparing the cached data would then have too many levels of
indentation, so extract them into a helper function.
Note that comparing the cached data in copied and original entries in
the shared index might actually be entirely unnecessary. In theory
all code paths refreshing the cached stat data of an entry in the
shared index should set the CE_UPDATE_IN_BASE flag in that entry, and
unpack_trees() should preserve this flag when copying cache entries.
This means that the cached data is only ever changed if the
CE_UPDATE_IN_BASE flag is set as well. Our test suite seems to
confirm this: instrumenting the conditions in question and running the
test suite repeatedly with 'GIT_TEST_SPLIT_INDEX=yes' showed that the
cached data in a copied entry differs from the data in the shared
entry only if its CE_UPDATE_IN_BASE flag is indeed set.
In practice, however, our test suite doesn't have 100% coverage,
GIT_TEST_SPLIT_INDEX is inherently random, and I certainly can't claim
to possess complete understanding of what goes on in unpack_trees()...
Therefore I kept the comparison of the cached data when
CE_UPDATE_IN_BASE is not set, just in case that an unnoticed or future
code path were to accidentally miss setting this flag upon refreshing
the cached stat data or unpack_trees() were to drop this flag while
copying a cache entry.
[1] Note that when unpack_trees() constructs the new index and decides
that a cache entry should now refer to different content than what
was recorded in the original index (e.g. 'git read-tree -m
HEAD^'), then that can't really be considered a copy of the
original, but rather the creation of a new entry. Notably and
pertinent to the split index feature, such a new entry doesn't
have a reference to the original's shared index entry anymore,
i.e. its 'index' field is set to 0. Consequently, such an entry
is treated by prepare_to_write_split_index() as an entry not
present in the shared index and it will be added to the new split
index, while the original entry will be marked as deleted, and
neither the above discussion nor the changes in this patch apply
to them.
Signed-off-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-10-11 09:43:08 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Thoroughly compare the cached data to see
|
|
|
|
* whether it should be marked for inclusion
|
|
|
|
* in the split index.
|
|
|
|
*
|
|
|
|
* This comparison might be unnecessary, as
|
|
|
|
* code paths modifying the cached data do
|
|
|
|
* set CE_UPDATE_IN_BASE as well.
|
|
|
|
*/
|
|
|
|
if (compare_ce_content(ce, base))
|
|
|
|
ce->ce_flags |= CE_UPDATE_IN_BASE;
|
|
|
|
}
|
2018-07-02 19:49:31 +00:00
|
|
|
discard_cache_entry(base);
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
si->base->cache[ce->index - 1] = ce;
|
|
|
|
}
|
|
|
|
for (i = 0; i < si->base->cache_nr; i++) {
|
|
|
|
ce = si->base->cache[i];
|
|
|
|
if ((ce->ce_flags & CE_REMOVE) ||
|
|
|
|
!(ce->ce_flags & CE_MATCHED))
|
|
|
|
ewah_set(si->delete_bitmap, i);
|
|
|
|
else if (ce->ce_flags & CE_UPDATE_IN_BASE) {
|
|
|
|
ewah_set(si->replace_bitmap, i);
|
2014-06-13 12:19:43 +00:00
|
|
|
ce->ce_flags |= CE_STRIP_NAME;
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
|
|
|
|
entries[nr_entries++] = ce;
|
|
|
|
}
|
2018-01-07 22:30:14 +00:00
|
|
|
if (is_null_oid(&ce->oid))
|
|
|
|
istate->drop_cache_tree = 1;
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < istate->cache_nr; i++) {
|
|
|
|
ce = istate->cache[i];
|
|
|
|
if ((!si->base || !ce->index) && !(ce->ce_flags & CE_REMOVE)) {
|
2014-06-13 12:19:43 +00:00
|
|
|
assert(!(ce->ce_flags & CE_STRIP_NAME));
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
ALLOC_GROW(entries, nr_entries+1, nr_alloc);
|
|
|
|
entries[nr_entries++] = ce;
|
|
|
|
}
|
|
|
|
ce->ce_flags &= ~CE_MATCHED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* take cache[] out temporarily, put entries[] in its place
|
|
|
|
* for writing
|
|
|
|
*/
|
|
|
|
si->saved_cache = istate->cache;
|
2014-06-13 12:19:36 +00:00
|
|
|
si->saved_cache_nr = istate->cache_nr;
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
istate->cache = entries;
|
|
|
|
istate->cache_nr = nr_entries;
|
2014-06-13 12:19:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void finish_writing_split_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct split_index *si = init_split_index(istate);
|
split-index: the writing part
prepare_to_write_split_index() does the major work, classifying
deleted, updated and added entries. write_link_extension() then just
writes it down.
An observation is, deleting an entry, then adding it back is recorded
as "entry X is deleted, entry X is added", not "entry X is replaced".
This is simpler, with small overhead: a replaced entry is stored
without its path, a new entry is store with its path.
A note about unpack_trees() and the deduplication code inside
prepare_to_write_split_index(). Usually tracking updated/removed
entries via read-cache API is enough. unpack_trees() manipulates the
index in a different way: it throws the entire source index out,
builds up a new one, copying/duplicating entries (using dup_entry)
from the source index over if necessary, then returns the new index.
A naive solution would be marking the entire source index "deleted"
and add their duplicates as new. That could bring $GIT_DIR/index back
to the original size. So we try harder and memcmp() between the
original and the duplicate to see if it needs updating.
We could avoid memcmp() too, by avoiding duplicating the original
entry in dup_entry(). The performance gain this way is within noise
level and it complicates unpack-trees.c. So memcmp() is the preferred
way to deal with deduplication.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2014-06-13 12:19:40 +00:00
|
|
|
|
|
|
|
ewah_free(si->delete_bitmap);
|
|
|
|
ewah_free(si->replace_bitmap);
|
|
|
|
si->delete_bitmap = NULL;
|
|
|
|
si->replace_bitmap = NULL;
|
|
|
|
free(istate->cache);
|
|
|
|
istate->cache = si->saved_cache;
|
2014-06-13 12:19:36 +00:00
|
|
|
istate->cache_nr = si->saved_cache_nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void discard_split_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct split_index *si = istate->split_index;
|
|
|
|
if (!si)
|
|
|
|
return;
|
|
|
|
istate->split_index = NULL;
|
|
|
|
si->refcount--;
|
|
|
|
if (si->refcount)
|
|
|
|
return;
|
|
|
|
if (si->base) {
|
|
|
|
discard_index(si->base);
|
|
|
|
free(si->base);
|
|
|
|
}
|
|
|
|
free(si);
|
|
|
|
}
|
2014-06-13 12:19:38 +00:00
|
|
|
|
|
|
|
void save_or_free_index_entry(struct index_state *istate, struct cache_entry *ce)
|
|
|
|
{
|
|
|
|
if (ce->index &&
|
|
|
|
istate->split_index &&
|
|
|
|
istate->split_index->base &&
|
|
|
|
ce->index <= istate->split_index->base->cache_nr &&
|
|
|
|
ce == istate->split_index->base->cache[ce->index - 1])
|
|
|
|
ce->ce_flags |= CE_REMOVE;
|
|
|
|
else
|
2018-07-02 19:49:31 +00:00
|
|
|
discard_cache_entry(ce);
|
2014-06-13 12:19:38 +00:00
|
|
|
}
|
2014-06-13 12:19:39 +00:00
|
|
|
|
|
|
|
void replace_index_entry_in_base(struct index_state *istate,
|
2018-02-14 18:59:48 +00:00
|
|
|
struct cache_entry *old_entry,
|
|
|
|
struct cache_entry *new_entry)
|
2014-06-13 12:19:39 +00:00
|
|
|
{
|
2018-02-14 18:59:48 +00:00
|
|
|
if (old_entry->index &&
|
2014-06-13 12:19:39 +00:00
|
|
|
istate->split_index &&
|
|
|
|
istate->split_index->base &&
|
2018-02-14 18:59:48 +00:00
|
|
|
old_entry->index <= istate->split_index->base->cache_nr) {
|
|
|
|
new_entry->index = old_entry->index;
|
|
|
|
if (old_entry != istate->split_index->base->cache[new_entry->index - 1])
|
2018-07-02 19:49:31 +00:00
|
|
|
discard_cache_entry(istate->split_index->base->cache[new_entry->index - 1]);
|
2018-02-14 18:59:48 +00:00
|
|
|
istate->split_index->base->cache[new_entry->index - 1] = new_entry;
|
2014-06-13 12:19:39 +00:00
|
|
|
}
|
|
|
|
}
|
2017-02-27 18:00:01 +00:00
|
|
|
|
|
|
|
void add_split_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
if (!istate->split_index) {
|
|
|
|
init_split_index(istate);
|
|
|
|
istate->cache_changed |= SPLIT_INDEX_ORDERED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void remove_split_index(struct index_state *istate)
|
|
|
|
{
|
2017-06-24 19:02:39 +00:00
|
|
|
if (istate->split_index) {
|
2019-02-13 09:51:29 +00:00
|
|
|
if (istate->split_index->base) {
|
|
|
|
/*
|
|
|
|
* When removing the split index, we need to move
|
|
|
|
* ownership of the mem_pool associated with the
|
|
|
|
* base index to the main index. There may be cache entries
|
|
|
|
* allocated from the base's memory pool that are shared with
|
|
|
|
* the_index.cache[].
|
|
|
|
*/
|
|
|
|
mem_pool_combine(istate->ce_mem_pool,
|
|
|
|
istate->split_index->base->ce_mem_pool);
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
|
2019-02-13 09:51:29 +00:00
|
|
|
/*
|
|
|
|
* The split index no longer owns the mem_pool backing
|
|
|
|
* its cache array. As we are discarding this index,
|
|
|
|
* mark the index as having no cache entries, so it
|
|
|
|
* will not attempt to clean up the cache entries or
|
|
|
|
* validate them.
|
|
|
|
*/
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
istate->split_index->base->cache_nr = 0;
|
2019-02-13 09:51:29 +00:00
|
|
|
}
|
block alloc: allocate cache entries from mem_pool
When reading large indexes from disk, a portion of the time is
dominated in malloc() calls. This can be mitigated by allocating a
large block of memory and manage it ourselves via memory pools.
This change moves the cache entry allocation to be on top of memory
pools.
Design:
The index_state struct will gain a notion of an associated memory_pool
from which cache_entries will be allocated from. When reading in the
index from disk, we have information on the number of entries and
their size, which can guide us in deciding how large our initial
memory allocation should be. When an index is discarded, the
associated memory_pool will be discarded as well - so the lifetime of
a cache_entry is tied to the lifetime of the index_state that it was
allocated for.
In the case of a Split Index, the following rules are followed. 1st,
some terminology is defined:
Terminology:
- 'the_index': represents the logical view of the index
- 'split_index': represents the "base" cache entries. Read from the
split index file.
'the_index' can reference a single split_index, as well as
cache_entries from the split_index. `the_index` will be discarded
before the `split_index` is. This means that when we are allocating
cache_entries in the presence of a split index, we need to allocate
the entries from the `split_index`'s memory pool. This allows us to
follow the pattern that `the_index` can reference cache_entries from
the `split_index`, and that the cache_entries will not be freed while
they are still being referenced.
Managing transient cache_entry structs:
Cache entries are usually allocated for an index, but this is not always
the case. Cache entries are sometimes allocated because this is the
type that the existing checkout_entry function works with. Because of
this, the existing code needs to handle cache entries associated with an
index / memory pool, and those that only exist transiently. Several
strategies were contemplated around how to handle this:
Chosen approach:
An extra field was added to the cache_entry type to track whether the
cache_entry was allocated from a memory pool or not. This is currently
an int field, as there are no more available bits in the existing
ce_flags bit field. If / when more bits are needed, this new field can
be turned into a proper bit field.
Alternatives:
1) Do not include any information about how the cache_entry was
allocated. Calling code would be responsible for tracking whether the
cache_entry needed to be freed or not.
Pro: No extra memory overhead to track this state
Con: Extra complexity in callers to handle this correctly.
The extra complexity and burden to not regress this behavior in the
future was more than we wanted.
2) cache_entry would gain knowledge about which mem_pool allocated it
Pro: Could (potentially) do extra logic to know when a mem_pool no
longer had references to any cache_entry
Con: cache_entry would grow heavier by a pointer, instead of int
We didn't see a tangible benefit to this approach
3) Do not add any extra information to a cache_entry, but when freeing a
cache entry, check if the memory exists in a region managed by existing
mem_pools.
Pro: No extra memory overhead to track state
Con: Extra computation is performed when freeing cache entries
We decided tracking and iterating over known memory pool regions was
less desirable than adding an extra field to track this stae.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-07-02 19:49:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We can discard the split index because its
|
|
|
|
* memory pool has been incorporated into the
|
|
|
|
* memory pool associated with the the_index.
|
|
|
|
*/
|
|
|
|
discard_split_index(istate);
|
|
|
|
|
2017-06-24 19:02:39 +00:00
|
|
|
istate->cache_changed |= SOMETHING_CHANGED;
|
|
|
|
}
|
2017-02-27 18:00:01 +00:00
|
|
|
}
|