mmapped_ref_iterator: inline into packed_ref_iterator

Since `packed_ref_iterator` is now delegating to
`mmapped_ref_iterator` rather than `cache_ref_iterator` to do the
heavy lifting, there is no need to keep the two iterators separate. So
"inline" `mmapped_ref_iterator` into `packed_ref_iterator`. This
removes a bunch of boilerplate.

Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
This commit is contained in:
Michael Haggerty 2017-09-25 10:00:17 +02:00 committed by Junio C Hamano
parent a6e19bcdad
commit 523ee2d785

View file

@ -225,157 +225,6 @@ static NORETURN void die_invalid_line(const char *path,
}
/*
* This value is set in `base.flags` if the peeled value of the
* current reference is known. In that case, `peeled` contains the
* correct peeled value for the reference, which might be `null_sha1`
* if the reference is not a tag or if it is broken.
*/
#define REF_KNOWS_PEELED 0x40
/*
* An iterator over a packed-refs file that is currently mmapped.
*/
struct mmapped_ref_iterator {
struct ref_iterator base;
struct packed_ref_cache *packed_refs;
/* The current position in the mmapped file: */
const char *pos;
/* The end of the mmapped file: */
const char *eof;
struct object_id oid, peeled;
struct strbuf refname_buf;
};
static int mmapped_ref_iterator_advance(struct ref_iterator *ref_iterator)
{
struct mmapped_ref_iterator *iter =
(struct mmapped_ref_iterator *)ref_iterator;
const char *p = iter->pos, *eol;
strbuf_reset(&iter->refname_buf);
if (iter->pos == iter->eof)
return ref_iterator_abort(ref_iterator);
iter->base.flags = REF_ISPACKED;
if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
parse_oid_hex(p, &iter->oid, &p) ||
!isspace(*p++))
die_invalid_line(iter->packed_refs->refs->path,
iter->pos, iter->eof - iter->pos);
eol = memchr(p, '\n', iter->eof - p);
if (!eol)
die_unterminated_line(iter->packed_refs->refs->path,
iter->pos, iter->eof - iter->pos);
strbuf_add(&iter->refname_buf, p, eol - p);
iter->base.refname = iter->refname_buf.buf;
if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
if (!refname_is_safe(iter->base.refname))
die("packed refname is dangerous: %s",
iter->base.refname);
oidclr(&iter->oid);
iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
}
if (iter->packed_refs->peeled == PEELED_FULLY ||
(iter->packed_refs->peeled == PEELED_TAGS &&
starts_with(iter->base.refname, "refs/tags/")))
iter->base.flags |= REF_KNOWS_PEELED;
iter->pos = eol + 1;
if (iter->pos < iter->eof && *iter->pos == '^') {
p = iter->pos + 1;
if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
parse_oid_hex(p, &iter->peeled, &p) ||
*p++ != '\n')
die_invalid_line(iter->packed_refs->refs->path,
iter->pos, iter->eof - iter->pos);
iter->pos = p;
/*
* Regardless of what the file header said, we
* definitely know the value of *this* reference. But
* we suppress it if the reference is broken:
*/
if ((iter->base.flags & REF_ISBROKEN)) {
oidclr(&iter->peeled);
iter->base.flags &= ~REF_KNOWS_PEELED;
} else {
iter->base.flags |= REF_KNOWS_PEELED;
}
} else {
oidclr(&iter->peeled);
}
return ITER_OK;
}
static int mmapped_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct object_id *peeled)
{
struct mmapped_ref_iterator *iter =
(struct mmapped_ref_iterator *)ref_iterator;
if ((iter->base.flags & REF_KNOWS_PEELED)) {
oidcpy(peeled, &iter->peeled);
return is_null_oid(&iter->peeled) ? -1 : 0;
} else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
return -1;
} else {
return !!peel_object(iter->oid.hash, peeled->hash);
}
}
static int mmapped_ref_iterator_abort(struct ref_iterator *ref_iterator)
{
struct mmapped_ref_iterator *iter =
(struct mmapped_ref_iterator *)ref_iterator;
release_packed_ref_cache(iter->packed_refs);
strbuf_release(&iter->refname_buf);
base_ref_iterator_free(ref_iterator);
return ITER_DONE;
}
static struct ref_iterator_vtable mmapped_ref_iterator_vtable = {
mmapped_ref_iterator_advance,
mmapped_ref_iterator_peel,
mmapped_ref_iterator_abort
};
struct ref_iterator *mmapped_ref_iterator_begin(
struct packed_ref_cache *packed_refs,
const char *pos, const char *eof)
{
struct mmapped_ref_iterator *iter = xcalloc(1, sizeof(*iter));
struct ref_iterator *ref_iterator = &iter->base;
if (!packed_refs->buf)
return empty_ref_iterator_begin();
base_ref_iterator_init(ref_iterator, &mmapped_ref_iterator_vtable, 1);
iter->packed_refs = packed_refs;
acquire_packed_ref_cache(iter->packed_refs);
iter->pos = pos;
iter->eof = eof;
strbuf_init(&iter->refname_buf, 0);
iter->base.oid = &iter->oid;
return ref_iterator;
}
struct packed_ref_entry {
const char *start;
size_t len;
@ -858,38 +707,120 @@ static int packed_read_raw_ref(struct ref_store *ref_store,
return 0;
}
/*
* This value is set in `base.flags` if the peeled value of the
* current reference is known. In that case, `peeled` contains the
* correct peeled value for the reference, which might be `null_sha1`
* if the reference is not a tag or if it is broken.
*/
#define REF_KNOWS_PEELED 0x40
/*
* An iterator over a packed-refs file that is currently mmapped.
*/
struct packed_ref_iterator {
struct ref_iterator base;
struct packed_ref_cache *cache;
struct ref_iterator *iter0;
struct packed_ref_cache *packed_refs;
/* The current position in the mmapped file: */
const char *pos;
/* The end of the mmapped file: */
const char *eof;
struct object_id oid, peeled;
struct strbuf refname_buf;
unsigned int flags;
};
static int next_record(struct packed_ref_iterator *iter)
{
const char *p = iter->pos, *eol;
strbuf_reset(&iter->refname_buf);
if (iter->pos == iter->eof)
return ITER_DONE;
iter->base.flags = REF_ISPACKED;
if (iter->eof - p < GIT_SHA1_HEXSZ + 2 ||
parse_oid_hex(p, &iter->oid, &p) ||
!isspace(*p++))
die_invalid_line(iter->packed_refs->refs->path,
iter->pos, iter->eof - iter->pos);
eol = memchr(p, '\n', iter->eof - p);
if (!eol)
die_unterminated_line(iter->packed_refs->refs->path,
iter->pos, iter->eof - iter->pos);
strbuf_add(&iter->refname_buf, p, eol - p);
iter->base.refname = iter->refname_buf.buf;
if (check_refname_format(iter->base.refname, REFNAME_ALLOW_ONELEVEL)) {
if (!refname_is_safe(iter->base.refname))
die("packed refname is dangerous: %s",
iter->base.refname);
oidclr(&iter->oid);
iter->base.flags |= REF_BAD_NAME | REF_ISBROKEN;
}
if (iter->packed_refs->peeled == PEELED_FULLY ||
(iter->packed_refs->peeled == PEELED_TAGS &&
starts_with(iter->base.refname, "refs/tags/")))
iter->base.flags |= REF_KNOWS_PEELED;
iter->pos = eol + 1;
if (iter->pos < iter->eof && *iter->pos == '^') {
p = iter->pos + 1;
if (iter->eof - p < GIT_SHA1_HEXSZ + 1 ||
parse_oid_hex(p, &iter->peeled, &p) ||
*p++ != '\n')
die_invalid_line(iter->packed_refs->refs->path,
iter->pos, iter->eof - iter->pos);
iter->pos = p;
/*
* Regardless of what the file header said, we
* definitely know the value of *this* reference. But
* we suppress it if the reference is broken:
*/
if ((iter->base.flags & REF_ISBROKEN)) {
oidclr(&iter->peeled);
iter->base.flags &= ~REF_KNOWS_PEELED;
} else {
iter->base.flags |= REF_KNOWS_PEELED;
}
} else {
oidclr(&iter->peeled);
}
return ITER_OK;
}
static int packed_ref_iterator_advance(struct ref_iterator *ref_iterator)
{
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
int ok;
while ((ok = ref_iterator_advance(iter->iter0)) == ITER_OK) {
while ((ok = next_record(iter)) == ITER_OK) {
if (iter->flags & DO_FOR_EACH_PER_WORKTREE_ONLY &&
ref_type(iter->iter0->refname) != REF_TYPE_PER_WORKTREE)
ref_type(iter->base.refname) != REF_TYPE_PER_WORKTREE)
continue;
if (!(iter->flags & DO_FOR_EACH_INCLUDE_BROKEN) &&
!ref_resolves_to_object(iter->iter0->refname,
iter->iter0->oid,
iter->iter0->flags))
!ref_resolves_to_object(iter->base.refname, &iter->oid,
iter->flags))
continue;
iter->base.refname = iter->iter0->refname;
iter->base.oid = iter->iter0->oid;
iter->base.flags = iter->iter0->flags;
return ITER_OK;
}
iter->iter0 = NULL;
if (ref_iterator_abort(ref_iterator) != ITER_DONE)
ok = ITER_ERROR;
@ -902,7 +833,14 @@ static int packed_ref_iterator_peel(struct ref_iterator *ref_iterator,
struct packed_ref_iterator *iter =
(struct packed_ref_iterator *)ref_iterator;
return ref_iterator_peel(iter->iter0, peeled);
if ((iter->base.flags & REF_KNOWS_PEELED)) {
oidcpy(peeled, &iter->peeled);
return is_null_oid(&iter->peeled) ? -1 : 0;
} else if ((iter->base.flags & (REF_ISBROKEN | REF_ISSYMREF))) {
return -1;
} else {
return !!peel_object(iter->oid.hash, peeled->hash);
}
}
static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
@ -911,10 +849,8 @@ static int packed_ref_iterator_abort(struct ref_iterator *ref_iterator)
(struct packed_ref_iterator *)ref_iterator;
int ok = ITER_DONE;
if (iter->iter0)
ok = ref_iterator_abort(iter->iter0);
release_packed_ref_cache(iter->cache);
strbuf_release(&iter->refname_buf);
release_packed_ref_cache(iter->packed_refs);
base_ref_iterator_free(ref_iterator);
return ok;
}
@ -940,6 +876,11 @@ static struct ref_iterator *packed_ref_iterator_begin(
required_flags |= REF_STORE_ODB;
refs = packed_downcast(ref_store, required_flags, "ref_iterator_begin");
packed_refs = get_packed_ref_cache(refs);
if (!packed_refs->buf)
return empty_ref_iterator_begin();
iter = xcalloc(1, sizeof(*iter));
ref_iterator = &iter->base;
base_ref_iterator_init(ref_iterator, &packed_ref_iterator_vtable, 1);
@ -949,7 +890,7 @@ static struct ref_iterator *packed_ref_iterator_begin(
* the packed-ref cache is up to date with what is on disk,
* and re-reads it if not.
*/
iter->cache = packed_refs = get_packed_ref_cache(refs);
iter->packed_refs = packed_refs;
acquire_packed_ref_cache(packed_refs);
if (prefix && *prefix)
@ -957,8 +898,11 @@ static struct ref_iterator *packed_ref_iterator_begin(
else
start = packed_refs->buf + packed_refs->header_len;
iter->iter0 = mmapped_ref_iterator_begin(packed_refs,
start, packed_refs->eof);
iter->pos = start;
iter->eof = packed_refs->eof;
strbuf_init(&iter->refname_buf, 0);
iter->base.oid = &iter->oid;
iter->flags = flags;