mirror of
https://github.com/git/git
synced 2024-11-05 18:59:29 +00:00
b739d971e5
While updating the microsoft/git fork on top of v2.26.0-rc0 and consuming that build into Scalar, I noticed a corner case bug around partial clone. The "scalar clone" command can create a Git repository with the proper config for using partial clone with the "blob:none" filter. Instead of calling "git clone", it runs "git init" then sets a few more config values before running "git fetch". In our builds on v2.26.0-rc0, we noticed that our "git fetch" command was failing with error: https://github.com/microsoft/scalar did not send all necessary objects This does not happen if you copy the config file from a repository created by "git clone --filter=blob:none <url>", but it does happen when adding the config option "core.logAllRefUpdates = true". By debugging, I was able to see that the loop inside check_connnected() that checks if all refs are contained in promisor packs actually did not have any packfiles in the packed_git list. I'm not sure what corner-case issues caused this config option to prevent the reprepare_packed_git() from being called at the proper spot during the fetch operation. This approach requires a situation where we use the remote helper process, which makes it difficult to test. It is possible to place a reprepare_packed_git() call in the fetch code closer to where we receive a pack, but that leaves an opening for a later change to re-introduce this problem. Further, a concurrent repack operation could replace the pack-file list we already loaded into memory, causing this issue in an even harder to reproduce scenario. It is really the responsibility of anyone looping through the list of pack-files for a certain object to fall back to reprepare_packed_git() on a fail-to-find. The loop in check_connected() does not have this fallback, leading to this bug. We _could_ try looping through the packs and only reprepare the packs after a miss, but that change is more involved and has little value. Since this case is isolated to the case when opt->check_refs_are_promisor_objects_only is true, we are confident that we are verifying the refs after downloading new data. This implies that calling reprepare_packed_git() in advance is not a huge cost compared to the rest of the operations already made. Helped-by: Jeff King <peff@peff.net> Helped-by: Junio Hamano <gitster@pobox.com> Signed-off-by: Derrick Stolee <dstolee@microsoft.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
145 lines
4.2 KiB
C
145 lines
4.2 KiB
C
#include "cache.h"
|
|
#include "object-store.h"
|
|
#include "run-command.h"
|
|
#include "sigchain.h"
|
|
#include "connected.h"
|
|
#include "transport.h"
|
|
#include "packfile.h"
|
|
#include "promisor-remote.h"
|
|
|
|
/*
|
|
* If we feed all the commits we want to verify to this command
|
|
*
|
|
* $ git rev-list --objects --stdin --not --all
|
|
*
|
|
* and if it does not error out, that means everything reachable from
|
|
* these commits locally exists and is connected to our existing refs.
|
|
* Note that this does _not_ validate the individual objects.
|
|
*
|
|
* Returns 0 if everything is connected, non-zero otherwise.
|
|
*/
|
|
int check_connected(oid_iterate_fn fn, void *cb_data,
|
|
struct check_connected_options *opt)
|
|
{
|
|
struct child_process rev_list = CHILD_PROCESS_INIT;
|
|
struct check_connected_options defaults = CHECK_CONNECTED_INIT;
|
|
char commit[GIT_MAX_HEXSZ + 1];
|
|
struct object_id oid;
|
|
int err = 0;
|
|
struct packed_git *new_pack = NULL;
|
|
struct transport *transport;
|
|
size_t base_len;
|
|
const unsigned hexsz = the_hash_algo->hexsz;
|
|
|
|
if (!opt)
|
|
opt = &defaults;
|
|
transport = opt->transport;
|
|
|
|
if (fn(cb_data, &oid)) {
|
|
if (opt->err_fd)
|
|
close(opt->err_fd);
|
|
return err;
|
|
}
|
|
|
|
if (transport && transport->smart_options &&
|
|
transport->smart_options->self_contained_and_connected &&
|
|
transport->pack_lockfile &&
|
|
strip_suffix(transport->pack_lockfile, ".keep", &base_len)) {
|
|
struct strbuf idx_file = STRBUF_INIT;
|
|
strbuf_add(&idx_file, transport->pack_lockfile, base_len);
|
|
strbuf_addstr(&idx_file, ".idx");
|
|
new_pack = add_packed_git(idx_file.buf, idx_file.len, 1);
|
|
strbuf_release(&idx_file);
|
|
}
|
|
|
|
if (opt->check_refs_are_promisor_objects_only) {
|
|
/*
|
|
* For partial clones, we don't want to have to do a regular
|
|
* connectivity check because we have to enumerate and exclude
|
|
* all promisor objects (slow), and then the connectivity check
|
|
* itself becomes a no-op because in a partial clone every
|
|
* object is a promisor object. Instead, just make sure we
|
|
* received, in a promisor packfile, the objects pointed to by
|
|
* each wanted ref.
|
|
*
|
|
* Before checking for promisor packs, be sure we have the
|
|
* latest pack-files loaded into memory.
|
|
*/
|
|
reprepare_packed_git(the_repository);
|
|
do {
|
|
struct packed_git *p;
|
|
|
|
for (p = get_all_packs(the_repository); p; p = p->next) {
|
|
if (!p->pack_promisor)
|
|
continue;
|
|
if (find_pack_entry_one(oid.hash, p))
|
|
goto promisor_pack_found;
|
|
}
|
|
return 1;
|
|
promisor_pack_found:
|
|
;
|
|
} while (!fn(cb_data, &oid));
|
|
return 0;
|
|
}
|
|
|
|
if (opt->shallow_file) {
|
|
argv_array_push(&rev_list.args, "--shallow-file");
|
|
argv_array_push(&rev_list.args, opt->shallow_file);
|
|
}
|
|
argv_array_push(&rev_list.args,"rev-list");
|
|
argv_array_push(&rev_list.args, "--objects");
|
|
argv_array_push(&rev_list.args, "--stdin");
|
|
if (has_promisor_remote())
|
|
argv_array_push(&rev_list.args, "--exclude-promisor-objects");
|
|
if (!opt->is_deepening_fetch) {
|
|
argv_array_push(&rev_list.args, "--not");
|
|
argv_array_push(&rev_list.args, "--all");
|
|
}
|
|
argv_array_push(&rev_list.args, "--quiet");
|
|
argv_array_push(&rev_list.args, "--alternate-refs");
|
|
if (opt->progress)
|
|
argv_array_pushf(&rev_list.args, "--progress=%s",
|
|
_("Checking connectivity"));
|
|
|
|
rev_list.git_cmd = 1;
|
|
rev_list.env = opt->env;
|
|
rev_list.in = -1;
|
|
rev_list.no_stdout = 1;
|
|
if (opt->err_fd)
|
|
rev_list.err = opt->err_fd;
|
|
else
|
|
rev_list.no_stderr = opt->quiet;
|
|
|
|
if (start_command(&rev_list))
|
|
return error(_("Could not run 'git rev-list'"));
|
|
|
|
sigchain_push(SIGPIPE, SIG_IGN);
|
|
|
|
commit[hexsz] = '\n';
|
|
do {
|
|
/*
|
|
* If index-pack already checked that:
|
|
* - there are no dangling pointers in the new pack
|
|
* - the pack is self contained
|
|
* Then if the updated ref is in the new pack, then we
|
|
* are sure the ref is good and not sending it to
|
|
* rev-list for verification.
|
|
*/
|
|
if (new_pack && find_pack_entry_one(oid.hash, new_pack))
|
|
continue;
|
|
|
|
memcpy(commit, oid_to_hex(&oid), hexsz);
|
|
if (write_in_full(rev_list.in, commit, hexsz + 1) < 0) {
|
|
if (errno != EPIPE && errno != EINVAL)
|
|
error_errno(_("failed write to rev-list"));
|
|
err = -1;
|
|
break;
|
|
}
|
|
} while (!fn(cb_data, &oid));
|
|
|
|
if (close(rev_list.in))
|
|
err = error_errno(_("failed to close rev-list's stdin"));
|
|
|
|
sigchain_pop(SIGPIPE);
|
|
return finish_command(&rev_list) || err;
|
|
}
|