2023-05-16 06:33:57 +00:00
|
|
|
#include "git-compat-util.h"
|
2009-12-25 19:57:11 +00:00
|
|
|
#include "dir.h"
|
2023-04-22 20:17:20 +00:00
|
|
|
#include "hash.h"
|
2023-05-16 06:33:56 +00:00
|
|
|
#include "read-cache.h"
|
2009-12-25 08:30:51 +00:00
|
|
|
#include "resolve-undo.h"
|
2023-05-16 06:33:51 +00:00
|
|
|
#include "sparse-index.h"
|
2009-12-25 08:30:51 +00:00
|
|
|
#include "string-list.h"
|
|
|
|
|
|
|
|
/* The only error case is to run out of memory in string-list */
|
|
|
|
void record_resolve_undo(struct index_state *istate, struct cache_entry *ce)
|
|
|
|
{
|
|
|
|
struct string_list_item *lost;
|
|
|
|
struct resolve_undo_info *ui;
|
|
|
|
struct string_list *resolve_undo;
|
|
|
|
int stage = ce_stage(ce);
|
|
|
|
|
|
|
|
if (!stage)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!istate->resolve_undo) {
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(resolve_undo, 1);
|
2009-12-25 08:30:51 +00:00
|
|
|
resolve_undo->strdup_strings = 1;
|
|
|
|
istate->resolve_undo = resolve_undo;
|
|
|
|
}
|
|
|
|
resolve_undo = istate->resolve_undo;
|
2010-06-25 23:41:35 +00:00
|
|
|
lost = string_list_insert(resolve_undo, ce->name);
|
2009-12-25 08:30:51 +00:00
|
|
|
if (!lost->util)
|
|
|
|
lost->util = xcalloc(1, sizeof(*ui));
|
|
|
|
ui = lost->util;
|
2018-03-12 02:27:25 +00:00
|
|
|
oidcpy(&ui->oid[stage - 1], &ce->oid);
|
2009-12-25 08:30:51 +00:00
|
|
|
ui->mode[stage - 1] = ce->ce_mode;
|
|
|
|
}
|
|
|
|
|
2010-07-03 12:41:54 +00:00
|
|
|
void resolve_undo_write(struct strbuf *sb, struct string_list *resolve_undo)
|
2009-12-25 08:30:51 +00:00
|
|
|
{
|
2010-07-03 12:41:54 +00:00
|
|
|
struct string_list_item *item;
|
|
|
|
for_each_string_list_item(item, resolve_undo) {
|
|
|
|
struct resolve_undo_info *ui = item->util;
|
|
|
|
int i;
|
2009-12-25 08:30:51 +00:00
|
|
|
|
2010-07-03 12:41:54 +00:00
|
|
|
if (!ui)
|
2009-12-25 08:30:51 +00:00
|
|
|
continue;
|
2010-07-03 12:41:54 +00:00
|
|
|
strbuf_addstr(sb, item->string);
|
|
|
|
strbuf_addch(sb, 0);
|
|
|
|
for (i = 0; i < 3; i++)
|
|
|
|
strbuf_addf(sb, "%o%c", ui->mode[i], 0);
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
if (!ui->mode[i])
|
|
|
|
continue;
|
2018-03-12 02:27:25 +00:00
|
|
|
strbuf_add(sb, ui->oid[i].hash, the_hash_algo->rawsz);
|
2010-07-03 12:41:54 +00:00
|
|
|
}
|
2009-12-25 08:30:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-02 06:04:03 +00:00
|
|
|
struct string_list *resolve_undo_read(const char *data, unsigned long size)
|
2009-12-25 08:30:51 +00:00
|
|
|
{
|
|
|
|
struct string_list *resolve_undo;
|
|
|
|
size_t len;
|
|
|
|
char *endptr;
|
|
|
|
int i;
|
2018-03-12 02:27:25 +00:00
|
|
|
const unsigned rawsz = the_hash_algo->rawsz;
|
2009-12-25 08:30:51 +00:00
|
|
|
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(resolve_undo, 1);
|
2009-12-25 08:30:51 +00:00
|
|
|
resolve_undo->strdup_strings = 1;
|
|
|
|
|
|
|
|
while (size) {
|
|
|
|
struct string_list_item *lost;
|
|
|
|
struct resolve_undo_info *ui;
|
|
|
|
|
|
|
|
len = strlen(data) + 1;
|
|
|
|
if (size <= len)
|
|
|
|
goto error;
|
2010-06-25 23:41:35 +00:00
|
|
|
lost = string_list_insert(resolve_undo, data);
|
2009-12-25 08:30:51 +00:00
|
|
|
if (!lost->util)
|
|
|
|
lost->util = xcalloc(1, sizeof(*ui));
|
|
|
|
ui = lost->util;
|
|
|
|
size -= len;
|
|
|
|
data += len;
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
ui->mode[i] = strtoul(data, &endptr, 8);
|
|
|
|
if (!endptr || endptr == data || *endptr)
|
|
|
|
goto error;
|
|
|
|
len = (endptr + 1) - (char*)data;
|
|
|
|
if (size <= len)
|
|
|
|
goto error;
|
|
|
|
size -= len;
|
|
|
|
data += len;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
if (!ui->mode[i])
|
|
|
|
continue;
|
2018-03-12 02:27:25 +00:00
|
|
|
if (size < rawsz)
|
2009-12-25 08:30:51 +00:00
|
|
|
goto error;
|
2018-05-02 00:25:29 +00:00
|
|
|
oidread(&ui->oid[i], (const unsigned char *)data);
|
2018-03-12 02:27:25 +00:00
|
|
|
size -= rawsz;
|
|
|
|
data += rawsz;
|
2009-12-25 08:30:51 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return resolve_undo;
|
|
|
|
|
|
|
|
error:
|
|
|
|
string_list_clear(resolve_undo, 1);
|
|
|
|
error("Index records invalid resolve-undo information");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void resolve_undo_clear_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct string_list *resolve_undo = istate->resolve_undo;
|
|
|
|
if (!resolve_undo)
|
|
|
|
return;
|
|
|
|
string_list_clear(resolve_undo, 1);
|
|
|
|
free(resolve_undo);
|
|
|
|
istate->resolve_undo = NULL;
|
2014-06-13 12:19:29 +00:00
|
|
|
istate->cache_changed |= RESOLVE_UNDO_CHANGED;
|
2009-12-25 08:30:51 +00:00
|
|
|
}
|
2009-12-25 19:57:11 +00:00
|
|
|
|
|
|
|
int unmerge_index_entry_at(struct index_state *istate, int pos)
|
|
|
|
{
|
Convert "struct cache_entry *" to "const ..." wherever possible
I attempted to make index_state->cache[] a "const struct cache_entry **"
to find out how existing entries in index are modified and where. The
question I have is what do we do if we really need to keep track of on-disk
changes in the index. The result is
- diff-lib.c: setting CE_UPTODATE
- name-hash.c: setting CE_HASHED
- preload-index.c, read-cache.c, unpack-trees.c and
builtin/update-index: obvious
- entry.c: write_entry() may refresh the checked out entry via
fill_stat_cache_info(). This causes "non-const struct cache_entry
*" in builtin/apply.c, builtin/checkout-index.c and
builtin/checkout.c
- builtin/ls-files.c: --with-tree changes stagemask and may set
CE_UPDATE
Of these, write_entry() and its call sites are probably most
interesting because it modifies on-disk info. But this is stat info
and can be retrieved via refresh, at least for porcelain
commands. Other just uses ce_flags for local purposes.
So, keeping track of "dirty" entries is just a matter of setting a
flag in index modification functions exposed by read-cache.c. Except
unpack-trees, the rest of the code base does not do anything funny
behind read-cache's back.
The actual patch is less valueable than the summary above. But if
anyone wants to re-identify the above sites. Applying this patch, then
this:
diff --git a/cache.h b/cache.h
index 430d021..1692891 100644
--- a/cache.h
+++ b/cache.h
@@ -267,7 +267,7 @@ static inline unsigned int canon_mode(unsigned int mode)
#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
struct index_state {
- struct cache_entry **cache;
+ const struct cache_entry **cache;
unsigned int version;
unsigned int cache_nr, cache_alloc, cache_changed;
struct string_list *resolve_undo;
will help quickly identify them without bogus warnings.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-09 15:29:00 +00:00
|
|
|
const struct cache_entry *ce;
|
2009-12-25 19:57:11 +00:00
|
|
|
struct string_list_item *item;
|
|
|
|
struct resolve_undo_info *ru;
|
checkout: avoid unnecessary match_pathspec calls
In checkout_paths() we do this
- for all updated items, call match_pathspec
- for all items, call match_pathspec (inside unmerge_cache)
- for all items, call match_pathspec (for showing "path .. is unmerged)
- for updated items, call match_pathspec and update paths
That's a lot of duplicate match_pathspec(s) and the function is not
exactly cheap to be called so many times, especially on large indexes.
This patch makes it call match_pathspec once per updated index entry,
save the result in ce_flags and reuse the results in the following
loops.
The changes in 0a1283b (checkout $tree $path: do not clobber local
changes in $path not in $tree - 2011-09-30) limit the affected paths
to ones we read from $tree. We do not do anything to other modified
entries in this case, so the "for all items" above could be modified
to "for all updated items". But..
The command's behavior now is modified slightly: unmerged entries that
match $path, but not updated by $tree, are now NOT touched. Although
this should be considered a bug fix, not a regression. A new test is
added for this change.
And while at there, free ps_matched after use.
The following command is tested on webkit, 215k entries. The pattern
is chosen mainly to make match_pathspec sweat:
git checkout -- "*[a-zA-Z]*[a-zA-Z]*[a-zA-Z]*"
before after
real 0m3.493s 0m2.737s
user 0m2.239s 0m1.586s
sys 0m1.252s 0m1.151s
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-27 05:58:21 +00:00
|
|
|
int i, err = 0, matched;
|
2013-11-14 19:24:37 +00:00
|
|
|
char *name;
|
2009-12-25 19:57:11 +00:00
|
|
|
|
|
|
|
if (!istate->resolve_undo)
|
|
|
|
return pos;
|
|
|
|
|
|
|
|
ce = istate->cache[pos];
|
|
|
|
if (ce_stage(ce)) {
|
|
|
|
/* already unmerged */
|
|
|
|
while ((pos < istate->cache_nr) &&
|
|
|
|
! strcmp(istate->cache[pos]->name, ce->name))
|
|
|
|
pos++;
|
|
|
|
return pos - 1; /* return the last entry processed */
|
|
|
|
}
|
2010-06-25 23:41:37 +00:00
|
|
|
item = string_list_lookup(istate->resolve_undo, ce->name);
|
2009-12-25 19:57:11 +00:00
|
|
|
if (!item)
|
|
|
|
return pos;
|
|
|
|
ru = item->util;
|
|
|
|
if (!ru)
|
|
|
|
return pos;
|
checkout: avoid unnecessary match_pathspec calls
In checkout_paths() we do this
- for all updated items, call match_pathspec
- for all items, call match_pathspec (inside unmerge_cache)
- for all items, call match_pathspec (for showing "path .. is unmerged)
- for updated items, call match_pathspec and update paths
That's a lot of duplicate match_pathspec(s) and the function is not
exactly cheap to be called so many times, especially on large indexes.
This patch makes it call match_pathspec once per updated index entry,
save the result in ce_flags and reuse the results in the following
loops.
The changes in 0a1283b (checkout $tree $path: do not clobber local
changes in $path not in $tree - 2011-09-30) limit the affected paths
to ones we read from $tree. We do not do anything to other modified
entries in this case, so the "for all items" above could be modified
to "for all updated items". But..
The command's behavior now is modified slightly: unmerged entries that
match $path, but not updated by $tree, are now NOT touched. Although
this should be considered a bug fix, not a regression. A new test is
added for this change.
And while at there, free ps_matched after use.
The following command is tested on webkit, 215k entries. The pattern
is chosen mainly to make match_pathspec sweat:
git checkout -- "*[a-zA-Z]*[a-zA-Z]*[a-zA-Z]*"
before after
real 0m3.493s 0m2.737s
user 0m2.239s 0m1.586s
sys 0m1.252s 0m1.151s
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-27 05:58:21 +00:00
|
|
|
matched = ce->ce_flags & CE_MATCHED;
|
2013-11-14 19:24:37 +00:00
|
|
|
name = xstrdup(ce->name);
|
2009-12-25 19:57:11 +00:00
|
|
|
remove_index_entry_at(istate, pos);
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
struct cache_entry *nce;
|
|
|
|
if (!ru->mode[i])
|
|
|
|
continue;
|
2018-07-02 19:49:31 +00:00
|
|
|
nce = make_cache_entry(istate,
|
|
|
|
ru->mode[i],
|
|
|
|
&ru->oid[i],
|
2013-11-14 19:24:37 +00:00
|
|
|
name, i + 1, 0);
|
checkout: avoid unnecessary match_pathspec calls
In checkout_paths() we do this
- for all updated items, call match_pathspec
- for all items, call match_pathspec (inside unmerge_cache)
- for all items, call match_pathspec (for showing "path .. is unmerged)
- for updated items, call match_pathspec and update paths
That's a lot of duplicate match_pathspec(s) and the function is not
exactly cheap to be called so many times, especially on large indexes.
This patch makes it call match_pathspec once per updated index entry,
save the result in ce_flags and reuse the results in the following
loops.
The changes in 0a1283b (checkout $tree $path: do not clobber local
changes in $path not in $tree - 2011-09-30) limit the affected paths
to ones we read from $tree. We do not do anything to other modified
entries in this case, so the "for all items" above could be modified
to "for all updated items". But..
The command's behavior now is modified slightly: unmerged entries that
match $path, but not updated by $tree, are now NOT touched. Although
this should be considered a bug fix, not a regression. A new test is
added for this change.
And while at there, free ps_matched after use.
The following command is tested on webkit, 215k entries. The pattern
is chosen mainly to make match_pathspec sweat:
git checkout -- "*[a-zA-Z]*[a-zA-Z]*[a-zA-Z]*"
before after
real 0m3.493s 0m2.737s
user 0m2.239s 0m1.586s
sys 0m1.252s 0m1.151s
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-27 05:58:21 +00:00
|
|
|
if (matched)
|
|
|
|
nce->ce_flags |= CE_MATCHED;
|
2009-12-25 19:57:11 +00:00
|
|
|
if (add_index_entry(istate, nce, ADD_CACHE_OK_TO_ADD)) {
|
|
|
|
err = 1;
|
2013-11-14 19:24:37 +00:00
|
|
|
error("cannot unmerge '%s'", name);
|
2009-12-25 19:57:11 +00:00
|
|
|
}
|
|
|
|
}
|
2013-11-14 19:24:37 +00:00
|
|
|
free(name);
|
2009-12-25 19:57:11 +00:00
|
|
|
if (err)
|
|
|
|
return pos;
|
|
|
|
free(ru);
|
|
|
|
item->util = NULL;
|
|
|
|
return unmerge_index_entry_at(istate, pos);
|
|
|
|
}
|
|
|
|
|
checkout: avoid unnecessary match_pathspec calls
In checkout_paths() we do this
- for all updated items, call match_pathspec
- for all items, call match_pathspec (inside unmerge_cache)
- for all items, call match_pathspec (for showing "path .. is unmerged)
- for updated items, call match_pathspec and update paths
That's a lot of duplicate match_pathspec(s) and the function is not
exactly cheap to be called so many times, especially on large indexes.
This patch makes it call match_pathspec once per updated index entry,
save the result in ce_flags and reuse the results in the following
loops.
The changes in 0a1283b (checkout $tree $path: do not clobber local
changes in $path not in $tree - 2011-09-30) limit the affected paths
to ones we read from $tree. We do not do anything to other modified
entries in this case, so the "for all items" above could be modified
to "for all updated items". But..
The command's behavior now is modified slightly: unmerged entries that
match $path, but not updated by $tree, are now NOT touched. Although
this should be considered a bug fix, not a regression. A new test is
added for this change.
And while at there, free ps_matched after use.
The following command is tested on webkit, 215k entries. The pattern
is chosen mainly to make match_pathspec sweat:
git checkout -- "*[a-zA-Z]*[a-zA-Z]*[a-zA-Z]*"
before after
real 0m3.493s 0m2.737s
user 0m2.239s 0m1.586s
sys 0m1.252s 0m1.151s
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-27 05:58:21 +00:00
|
|
|
void unmerge_marked_index(struct index_state *istate)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!istate->resolve_undo)
|
|
|
|
return;
|
|
|
|
|
2021-04-01 01:49:59 +00:00
|
|
|
/* TODO: audit for interaction with sparse-index. */
|
|
|
|
ensure_full_index(istate);
|
checkout: avoid unnecessary match_pathspec calls
In checkout_paths() we do this
- for all updated items, call match_pathspec
- for all items, call match_pathspec (inside unmerge_cache)
- for all items, call match_pathspec (for showing "path .. is unmerged)
- for updated items, call match_pathspec and update paths
That's a lot of duplicate match_pathspec(s) and the function is not
exactly cheap to be called so many times, especially on large indexes.
This patch makes it call match_pathspec once per updated index entry,
save the result in ce_flags and reuse the results in the following
loops.
The changes in 0a1283b (checkout $tree $path: do not clobber local
changes in $path not in $tree - 2011-09-30) limit the affected paths
to ones we read from $tree. We do not do anything to other modified
entries in this case, so the "for all items" above could be modified
to "for all updated items". But..
The command's behavior now is modified slightly: unmerged entries that
match $path, but not updated by $tree, are now NOT touched. Although
this should be considered a bug fix, not a regression. A new test is
added for this change.
And while at there, free ps_matched after use.
The following command is tested on webkit, 215k entries. The pattern
is chosen mainly to make match_pathspec sweat:
git checkout -- "*[a-zA-Z]*[a-zA-Z]*[a-zA-Z]*"
before after
real 0m3.493s 0m2.737s
user 0m2.239s 0m1.586s
sys 0m1.252s 0m1.151s
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-27 05:58:21 +00:00
|
|
|
for (i = 0; i < istate->cache_nr; i++) {
|
Convert "struct cache_entry *" to "const ..." wherever possible
I attempted to make index_state->cache[] a "const struct cache_entry **"
to find out how existing entries in index are modified and where. The
question I have is what do we do if we really need to keep track of on-disk
changes in the index. The result is
- diff-lib.c: setting CE_UPTODATE
- name-hash.c: setting CE_HASHED
- preload-index.c, read-cache.c, unpack-trees.c and
builtin/update-index: obvious
- entry.c: write_entry() may refresh the checked out entry via
fill_stat_cache_info(). This causes "non-const struct cache_entry
*" in builtin/apply.c, builtin/checkout-index.c and
builtin/checkout.c
- builtin/ls-files.c: --with-tree changes stagemask and may set
CE_UPDATE
Of these, write_entry() and its call sites are probably most
interesting because it modifies on-disk info. But this is stat info
and can be retrieved via refresh, at least for porcelain
commands. Other just uses ce_flags for local purposes.
So, keeping track of "dirty" entries is just a matter of setting a
flag in index modification functions exposed by read-cache.c. Except
unpack-trees, the rest of the code base does not do anything funny
behind read-cache's back.
The actual patch is less valueable than the summary above. But if
anyone wants to re-identify the above sites. Applying this patch, then
this:
diff --git a/cache.h b/cache.h
index 430d021..1692891 100644
--- a/cache.h
+++ b/cache.h
@@ -267,7 +267,7 @@ static inline unsigned int canon_mode(unsigned int mode)
#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
struct index_state {
- struct cache_entry **cache;
+ const struct cache_entry **cache;
unsigned int version;
unsigned int cache_nr, cache_alloc, cache_changed;
struct string_list *resolve_undo;
will help quickly identify them without bogus warnings.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-09 15:29:00 +00:00
|
|
|
const struct cache_entry *ce = istate->cache[i];
|
checkout: avoid unnecessary match_pathspec calls
In checkout_paths() we do this
- for all updated items, call match_pathspec
- for all items, call match_pathspec (inside unmerge_cache)
- for all items, call match_pathspec (for showing "path .. is unmerged)
- for updated items, call match_pathspec and update paths
That's a lot of duplicate match_pathspec(s) and the function is not
exactly cheap to be called so many times, especially on large indexes.
This patch makes it call match_pathspec once per updated index entry,
save the result in ce_flags and reuse the results in the following
loops.
The changes in 0a1283b (checkout $tree $path: do not clobber local
changes in $path not in $tree - 2011-09-30) limit the affected paths
to ones we read from $tree. We do not do anything to other modified
entries in this case, so the "for all items" above could be modified
to "for all updated items". But..
The command's behavior now is modified slightly: unmerged entries that
match $path, but not updated by $tree, are now NOT touched. Although
this should be considered a bug fix, not a regression. A new test is
added for this change.
And while at there, free ps_matched after use.
The following command is tested on webkit, 215k entries. The pattern
is chosen mainly to make match_pathspec sweat:
git checkout -- "*[a-zA-Z]*[a-zA-Z]*[a-zA-Z]*"
before after
real 0m3.493s 0m2.737s
user 0m2.239s 0m1.586s
sys 0m1.252s 0m1.151s
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-27 05:58:21 +00:00
|
|
|
if (ce->ce_flags & CE_MATCHED)
|
|
|
|
i = unmerge_index_entry_at(istate, i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-14 08:35:51 +00:00
|
|
|
void unmerge_index(struct index_state *istate, const struct pathspec *pathspec)
|
2009-12-25 19:57:11 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!istate->resolve_undo)
|
|
|
|
return;
|
|
|
|
|
2021-04-01 01:49:59 +00:00
|
|
|
/* TODO: audit for interaction with sparse-index. */
|
|
|
|
ensure_full_index(istate);
|
2009-12-25 19:57:11 +00:00
|
|
|
for (i = 0; i < istate->cache_nr; i++) {
|
Convert "struct cache_entry *" to "const ..." wherever possible
I attempted to make index_state->cache[] a "const struct cache_entry **"
to find out how existing entries in index are modified and where. The
question I have is what do we do if we really need to keep track of on-disk
changes in the index. The result is
- diff-lib.c: setting CE_UPTODATE
- name-hash.c: setting CE_HASHED
- preload-index.c, read-cache.c, unpack-trees.c and
builtin/update-index: obvious
- entry.c: write_entry() may refresh the checked out entry via
fill_stat_cache_info(). This causes "non-const struct cache_entry
*" in builtin/apply.c, builtin/checkout-index.c and
builtin/checkout.c
- builtin/ls-files.c: --with-tree changes stagemask and may set
CE_UPDATE
Of these, write_entry() and its call sites are probably most
interesting because it modifies on-disk info. But this is stat info
and can be retrieved via refresh, at least for porcelain
commands. Other just uses ce_flags for local purposes.
So, keeping track of "dirty" entries is just a matter of setting a
flag in index modification functions exposed by read-cache.c. Except
unpack-trees, the rest of the code base does not do anything funny
behind read-cache's back.
The actual patch is less valueable than the summary above. But if
anyone wants to re-identify the above sites. Applying this patch, then
this:
diff --git a/cache.h b/cache.h
index 430d021..1692891 100644
--- a/cache.h
+++ b/cache.h
@@ -267,7 +267,7 @@ static inline unsigned int canon_mode(unsigned int mode)
#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
struct index_state {
- struct cache_entry **cache;
+ const struct cache_entry **cache;
unsigned int version;
unsigned int cache_nr, cache_alloc, cache_changed;
struct string_list *resolve_undo;
will help quickly identify them without bogus warnings.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-09 15:29:00 +00:00
|
|
|
const struct cache_entry *ce = istate->cache[i];
|
2018-08-13 16:14:37 +00:00
|
|
|
if (!ce_path_match(istate, ce, pathspec, NULL))
|
2009-12-25 19:57:11 +00:00
|
|
|
continue;
|
|
|
|
i = unmerge_index_entry_at(istate, i);
|
|
|
|
}
|
|
|
|
}
|