mirror of
https://github.com/git/git
synced 2024-10-30 13:20:15 +00:00
5d44cd1c8b
If you try to fsck a repository that isn't entirely empty, but that has no inter-object references (ie all the objects are blobs, and don't refer to anything else), git-fsck-objects currently fails. This probably cannot happen in practice, but can be tested with something like git init-db touch dummy git add dummy git fsck-objects where the fsck will die by a divide-by-zero when it tries to look up the references from the one object it found (hash_obj() will do a modulus by refs_hash_size). On some other archiectures (ppc, sparc) the divide-by-zero will go unnoticed, and we'll instead SIGSEGV when we hit the "refs_hash[j]" access. So move the test that should protect against this from mark_reachable() into lookup_object_refs(), which incidentally in the process also fixes mark_reachable() itself (it used to not mark the one object that _was_ reachable, because it decided that it had no refs too early). Signed-off-by: Linus Torvalds <torvalds@osdl.org> Signed-off-by: Junio C Hamano <junkio@cox.net>
144 lines
3 KiB
C
144 lines
3 KiB
C
#include "cache.h"
|
|
#include "object.h"
|
|
|
|
int track_object_refs = 0;
|
|
|
|
static unsigned int refs_hash_size, nr_object_refs;
|
|
static struct object_refs **refs_hash;
|
|
|
|
static unsigned int hash_obj(struct object *obj, unsigned int n)
|
|
{
|
|
unsigned int hash = *(unsigned int *)obj->sha1;
|
|
return hash % n;
|
|
}
|
|
|
|
static void insert_ref_hash(struct object_refs *ref, struct object_refs **hash, unsigned int size)
|
|
{
|
|
int j = hash_obj(ref->base, size);
|
|
|
|
while (hash[j]) {
|
|
j++;
|
|
if (j >= size)
|
|
j = 0;
|
|
}
|
|
hash[j] = ref;
|
|
}
|
|
|
|
static void grow_refs_hash(void)
|
|
{
|
|
int i;
|
|
int new_hash_size = (refs_hash_size + 1000) * 3 / 2;
|
|
struct object_refs **new_hash;
|
|
|
|
new_hash = xcalloc(new_hash_size, sizeof(struct object_refs *));
|
|
for (i = 0; i < refs_hash_size; i++) {
|
|
struct object_refs *ref = refs_hash[i];
|
|
if (!ref)
|
|
continue;
|
|
insert_ref_hash(ref, new_hash, new_hash_size);
|
|
}
|
|
free(refs_hash);
|
|
refs_hash = new_hash;
|
|
refs_hash_size = new_hash_size;
|
|
}
|
|
|
|
static void add_object_refs(struct object *obj, struct object_refs *ref)
|
|
{
|
|
int nr = nr_object_refs + 1;
|
|
|
|
if (nr > refs_hash_size * 2 / 3)
|
|
grow_refs_hash();
|
|
ref->base = obj;
|
|
insert_ref_hash(ref, refs_hash, refs_hash_size);
|
|
nr_object_refs = nr;
|
|
}
|
|
|
|
struct object_refs *lookup_object_refs(struct object *obj)
|
|
{
|
|
struct object_refs *ref;
|
|
int j;
|
|
|
|
/* nothing to lookup */
|
|
if (!refs_hash_size)
|
|
return NULL;
|
|
j = hash_obj(obj, refs_hash_size);
|
|
while ((ref = refs_hash[j]) != NULL) {
|
|
if (ref->base == obj)
|
|
break;
|
|
j++;
|
|
if (j >= refs_hash_size)
|
|
j = 0;
|
|
}
|
|
return ref;
|
|
}
|
|
|
|
struct object_refs *alloc_object_refs(unsigned count)
|
|
{
|
|
struct object_refs *refs;
|
|
size_t size = sizeof(*refs) + count*sizeof(struct object *);
|
|
|
|
refs = xcalloc(1, size);
|
|
refs->count = count;
|
|
return refs;
|
|
}
|
|
|
|
static int compare_object_pointers(const void *a, const void *b)
|
|
{
|
|
const struct object * const *pa = a;
|
|
const struct object * const *pb = b;
|
|
if (*pa == *pb)
|
|
return 0;
|
|
else if (*pa < *pb)
|
|
return -1;
|
|
else
|
|
return 1;
|
|
}
|
|
|
|
void set_object_refs(struct object *obj, struct object_refs *refs)
|
|
{
|
|
unsigned int i, j;
|
|
|
|
/* Do not install empty list of references */
|
|
if (refs->count < 1) {
|
|
free(refs);
|
|
return;
|
|
}
|
|
|
|
/* Sort the list and filter out duplicates */
|
|
qsort(refs->ref, refs->count, sizeof(refs->ref[0]),
|
|
compare_object_pointers);
|
|
for (i = j = 1; i < refs->count; i++) {
|
|
if (refs->ref[i] != refs->ref[i - 1])
|
|
refs->ref[j++] = refs->ref[i];
|
|
}
|
|
if (j < refs->count) {
|
|
/* Duplicates were found - reallocate list */
|
|
size_t size = sizeof(*refs) + j*sizeof(struct object *);
|
|
refs->count = j;
|
|
refs = xrealloc(refs, size);
|
|
}
|
|
|
|
for (i = 0; i < refs->count; i++)
|
|
refs->ref[i]->used = 1;
|
|
add_object_refs(obj, refs);
|
|
}
|
|
|
|
void mark_reachable(struct object *obj, unsigned int mask)
|
|
{
|
|
const struct object_refs *refs;
|
|
|
|
if (!track_object_refs)
|
|
die("cannot do reachability with object refs turned off");
|
|
/* If we've been here already, don't bother */
|
|
if (obj->flags & mask)
|
|
return;
|
|
obj->flags |= mask;
|
|
refs = lookup_object_refs(obj);
|
|
if (refs) {
|
|
unsigned i;
|
|
for (i = 0; i < refs->count; i++)
|
|
mark_reachable(refs->ref[i], mask);
|
|
}
|
|
}
|
|
|
|
|