2005-06-29 09:51:27 +00:00
|
|
|
#include "cache.h"
|
2018-04-25 18:21:04 +00:00
|
|
|
#include "repository.h"
|
2005-06-29 09:51:27 +00:00
|
|
|
#include "pack.h"
|
2008-02-28 05:25:19 +00:00
|
|
|
#include "pack-revindex.h"
|
2011-11-07 02:59:26 +00:00
|
|
|
#include "progress.h"
|
2017-08-18 22:20:19 +00:00
|
|
|
#include "packfile.h"
|
2018-03-23 17:20:59 +00:00
|
|
|
#include "object-store.h"
|
2005-06-29 09:51:27 +00:00
|
|
|
|
2011-03-16 07:08:34 +00:00
|
|
|
struct idx_entry {
|
2007-06-03 18:21:41 +00:00
|
|
|
off_t offset;
|
2017-05-06 22:10:20 +00:00
|
|
|
union idx_entry_object {
|
|
|
|
const unsigned char *hash;
|
|
|
|
struct object_id *oid;
|
|
|
|
} oid;
|
2008-06-25 03:19:02 +00:00
|
|
|
unsigned int nr;
|
2007-06-03 18:21:41 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static int compare_entries(const void *e1, const void *e2)
|
|
|
|
{
|
|
|
|
const struct idx_entry *entry1 = e1;
|
|
|
|
const struct idx_entry *entry2 = e2;
|
|
|
|
if (entry1->offset < entry2->offset)
|
|
|
|
return -1;
|
|
|
|
if (entry1->offset > entry2->offset)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2008-06-25 03:19:02 +00:00
|
|
|
int check_pack_crc(struct packed_git *p, struct pack_window **w_curs,
|
|
|
|
off_t offset, off_t len, unsigned int nr)
|
|
|
|
{
|
|
|
|
const uint32_t *index_crc;
|
2011-04-03 07:06:54 +00:00
|
|
|
uint32_t data_crc = crc32(0, NULL, 0);
|
2008-06-25 03:19:02 +00:00
|
|
|
|
|
|
|
do {
|
2011-06-10 18:52:15 +00:00
|
|
|
unsigned long avail;
|
2008-06-25 03:19:02 +00:00
|
|
|
void *data = use_pack(p, w_curs, offset, &avail);
|
|
|
|
if (avail > len)
|
|
|
|
avail = len;
|
|
|
|
data_crc = crc32(data_crc, data, avail);
|
|
|
|
offset += avail;
|
|
|
|
len -= avail;
|
|
|
|
} while (len);
|
|
|
|
|
|
|
|
index_crc = p->index_data;
|
2018-02-01 02:18:43 +00:00
|
|
|
index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
|
2008-06-25 03:19:02 +00:00
|
|
|
|
|
|
|
return data_crc != ntohl(*index_crc);
|
|
|
|
}
|
|
|
|
|
2018-11-10 05:49:07 +00:00
|
|
|
static int verify_packfile(struct repository *r,
|
|
|
|
struct packed_git *p,
|
2011-11-07 02:59:25 +00:00
|
|
|
struct pack_window **w_curs,
|
2011-11-07 02:59:26 +00:00
|
|
|
verify_fn fn,
|
|
|
|
struct progress *progress, uint32_t base_count)
|
|
|
|
|
2005-06-29 09:51:27 +00:00
|
|
|
{
|
2007-03-07 01:44:30 +00:00
|
|
|
off_t index_size = p->index_size;
|
2007-03-16 20:42:50 +00:00
|
|
|
const unsigned char *index_base = p->index_data;
|
2018-02-01 02:18:43 +00:00
|
|
|
git_hash_ctx ctx;
|
2017-05-06 22:10:20 +00:00
|
|
|
unsigned char hash[GIT_MAX_RAWSZ], *pack_sig;
|
2009-01-18 08:04:26 +00:00
|
|
|
off_t offset = 0, pack_sig_ofs = 0;
|
2007-03-07 01:44:19 +00:00
|
|
|
uint32_t nr_objects, i;
|
2008-05-29 21:34:50 +00:00
|
|
|
int err = 0;
|
2007-06-03 18:21:41 +00:00
|
|
|
struct idx_entry *entries;
|
2005-06-29 09:51:27 +00:00
|
|
|
|
verify_packfile: check pack validity before accessing data
The verify_packfile() does not explicitly open the packfile;
instead, it starts with a sha1 checksum over the whole pack,
and relies on use_pack() to open the packfile as a side
effect.
If the pack cannot be opened for whatever reason (either
because its header information is corrupted, or perhaps
because a simultaneous repack deleted it), then use_pack()
will die(), as it has no way to return an error. This is not
ideal, as verify_packfile() otherwise tries to gently return
an error (this lets programs like git-fsck go on to check
other packs).
Instead, let's check is_pack_valid() up front, and return an
error if it fails. This will open the pack as a side effect,
and then use_pack() will later rely on our cached
descriptor, and avoid calling die().
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-09-22 03:49:05 +00:00
|
|
|
if (!is_pack_valid(p))
|
|
|
|
return error("packfile %s cannot be accessed", p->pack_name);
|
2005-06-29 09:51:27 +00:00
|
|
|
|
2018-02-01 02:18:43 +00:00
|
|
|
the_hash_algo->init_fn(&ctx);
|
2009-01-18 08:04:26 +00:00
|
|
|
do {
|
2011-06-10 18:52:15 +00:00
|
|
|
unsigned long remaining;
|
2006-12-23 07:34:13 +00:00
|
|
|
unsigned char *in = use_pack(p, w_curs, offset, &remaining);
|
|
|
|
offset += remaining;
|
2009-01-18 08:04:26 +00:00
|
|
|
if (!pack_sig_ofs)
|
2018-02-01 02:18:43 +00:00
|
|
|
pack_sig_ofs = p->pack_size - the_hash_algo->rawsz;
|
2008-05-29 21:34:50 +00:00
|
|
|
if (offset > pack_sig_ofs)
|
|
|
|
remaining -= (unsigned int)(offset - pack_sig_ofs);
|
2018-02-01 02:18:43 +00:00
|
|
|
the_hash_algo->update_fn(&ctx, in, remaining);
|
2009-01-18 08:04:26 +00:00
|
|
|
} while (offset < pack_sig_ofs);
|
2018-02-01 02:18:43 +00:00
|
|
|
the_hash_algo->final_fn(hash, &ctx);
|
2008-05-29 21:34:50 +00:00
|
|
|
pack_sig = use_pack(p, w_curs, pack_sig_ofs, NULL);
|
2018-08-28 21:22:52 +00:00
|
|
|
if (!hasheq(hash, pack_sig))
|
2018-02-01 02:18:43 +00:00
|
|
|
err = error("%s pack checksum mismatch",
|
2008-05-29 21:34:50 +00:00
|
|
|
p->pack_name);
|
2018-08-28 21:22:52 +00:00
|
|
|
if (!hasheq(index_base + index_size - the_hash_algo->hexsz, pack_sig))
|
2018-02-01 02:18:43 +00:00
|
|
|
err = error("%s pack checksum does not match its index",
|
2008-05-29 21:34:50 +00:00
|
|
|
p->pack_name);
|
2006-12-23 07:34:13 +00:00
|
|
|
unuse_pack(w_curs);
|
2005-07-01 00:15:39 +00:00
|
|
|
|
|
|
|
/* Make sure everything reachable from idx is valid. Since we
|
|
|
|
* have verified that nr_objects matches between idx and pack,
|
|
|
|
* we do not do scan-streaming check on the pack file.
|
|
|
|
*/
|
2007-04-09 05:06:28 +00:00
|
|
|
nr_objects = p->num_objects;
|
2016-02-22 22:44:25 +00:00
|
|
|
ALLOC_ARRAY(entries, nr_objects + 1);
|
2008-06-25 03:19:02 +00:00
|
|
|
entries[nr_objects].offset = pack_sig_ofs;
|
2007-06-03 18:21:41 +00:00
|
|
|
/* first sort entries by pack offset, since unpacking them is more efficient that way */
|
|
|
|
for (i = 0; i < nr_objects; i++) {
|
2017-05-06 22:10:20 +00:00
|
|
|
entries[i].oid.hash = nth_packed_object_sha1(p, i);
|
|
|
|
if (!entries[i].oid.hash)
|
2007-06-03 18:21:41 +00:00
|
|
|
die("internal error pack-check nth-packed-object");
|
2008-06-25 03:17:12 +00:00
|
|
|
entries[i].offset = nth_packed_object_offset(p, i);
|
2008-06-25 03:19:02 +00:00
|
|
|
entries[i].nr = i;
|
2007-06-03 18:21:41 +00:00
|
|
|
}
|
2016-09-29 15:27:31 +00:00
|
|
|
QSORT(entries, nr_objects, compare_entries);
|
2007-06-03 18:21:41 +00:00
|
|
|
|
2008-05-29 21:34:50 +00:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
2005-07-01 00:15:39 +00:00
|
|
|
void *data;
|
2007-02-26 19:55:59 +00:00
|
|
|
enum object_type type;
|
2007-03-07 01:44:30 +00:00
|
|
|
unsigned long size;
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 15:44:04 +00:00
|
|
|
off_t curpos;
|
|
|
|
int data_valid;
|
2005-07-01 00:15:39 +00:00
|
|
|
|
2008-06-25 03:19:02 +00:00
|
|
|
if (p->index_version > 1) {
|
|
|
|
off_t offset = entries[i].offset;
|
|
|
|
off_t len = entries[i+1].offset - offset;
|
|
|
|
unsigned int nr = entries[i].nr;
|
|
|
|
if (check_pack_crc(p, w_curs, offset, len, nr))
|
|
|
|
err = error("index CRC mismatch for object %s "
|
|
|
|
"from %s at offset %"PRIuMAX"",
|
2017-05-06 22:10:20 +00:00
|
|
|
oid_to_hex(entries[i].oid.oid),
|
2008-06-25 03:19:02 +00:00
|
|
|
p->pack_name, (uintmax_t)offset);
|
|
|
|
}
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 15:44:04 +00:00
|
|
|
|
|
|
|
curpos = entries[i].offset;
|
|
|
|
type = unpack_object_header(p, w_curs, &curpos, &size);
|
|
|
|
unuse_pack(w_curs);
|
|
|
|
|
|
|
|
if (type == OBJ_BLOB && big_file_threshold <= size) {
|
|
|
|
/*
|
2018-03-12 02:27:39 +00:00
|
|
|
* Let check_object_signature() check it with
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 15:44:04 +00:00
|
|
|
* the streaming interface; no point slurping
|
|
|
|
* the data in-core only to discard.
|
|
|
|
*/
|
|
|
|
data = NULL;
|
|
|
|
data_valid = 0;
|
|
|
|
} else {
|
2018-11-10 05:49:07 +00:00
|
|
|
data = unpack_entry(r, p, entries[i].offset, &type, &size);
|
fsck: use streaming interface for large blobs in pack
For blobs, we want to make sure the on-disk data is not corrupted
(i.e. can be inflated and produce the expected SHA-1). Blob content is
opaque, there's nothing else inside to check for.
For really large blobs, we may want to avoid unpacking the entire blob
in memory, just to check whether it produces the same SHA-1. On 32-bit
systems, we may not have enough virtual address space for such memory
allocation. And even on 64-bit where it's not a problem, allocating a
lot more memory could result in kicking other parts of systems to swap
file, generating lots of I/O and slowing everything down.
For this particular operation, not unpacking the blob and letting
check_sha1_signature, which supports streaming interface, do the job
is sufficient. check_sha1_signature() is not shown in the diff,
unfortunately. But if will be called when "data_valid && !data" is
false.
We will call the callback function "fn" with NULL as "data". The only
callback of this function is fsck_obj_buffer(), which does not touch
"data" at all if it's a blob.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-07-13 15:44:04 +00:00
|
|
|
data_valid = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data_valid && !data)
|
2008-05-29 21:34:50 +00:00
|
|
|
err = error("cannot unpack %s from %s at offset %"PRIuMAX"",
|
2017-05-06 22:10:20 +00:00
|
|
|
oid_to_hex(entries[i].oid.oid), p->pack_name,
|
2008-05-29 21:34:50 +00:00
|
|
|
(uintmax_t)entries[i].offset);
|
2018-03-12 02:27:39 +00:00
|
|
|
else if (check_object_signature(entries[i].oid.oid, data, size, type_name(type)))
|
2005-07-07 22:12:20 +00:00
|
|
|
err = error("packed %s from %s is corrupt",
|
2017-05-06 22:10:20 +00:00
|
|
|
oid_to_hex(entries[i].oid.oid), p->pack_name);
|
2011-11-07 02:59:25 +00:00
|
|
|
else if (fn) {
|
|
|
|
int eaten = 0;
|
2017-05-06 22:10:20 +00:00
|
|
|
err |= fn(entries[i].oid.oid, type, size, data, &eaten);
|
2011-11-07 02:59:25 +00:00
|
|
|
if (eaten)
|
|
|
|
data = NULL;
|
|
|
|
}
|
2011-11-07 02:59:26 +00:00
|
|
|
if (((base_count + i) & 1023) == 0)
|
|
|
|
display_progress(progress, base_count + i);
|
2005-07-01 00:15:39 +00:00
|
|
|
free(data);
|
2011-11-07 02:59:26 +00:00
|
|
|
|
2005-07-01 00:15:39 +00:00
|
|
|
}
|
2011-11-07 02:59:26 +00:00
|
|
|
display_progress(progress, base_count + i);
|
2007-06-03 18:21:41 +00:00
|
|
|
free(entries);
|
2005-07-01 00:15:39 +00:00
|
|
|
|
|
|
|
return err;
|
2005-06-29 09:51:27 +00:00
|
|
|
}
|
|
|
|
|
2010-04-19 14:23:07 +00:00
|
|
|
int verify_pack_index(struct packed_git *p)
|
2005-06-29 09:51:27 +00:00
|
|
|
{
|
2007-05-26 05:24:19 +00:00
|
|
|
off_t index_size;
|
|
|
|
const unsigned char *index_base;
|
2018-02-01 02:18:43 +00:00
|
|
|
git_hash_ctx ctx;
|
|
|
|
unsigned char hash[GIT_MAX_RAWSZ];
|
2008-05-29 21:34:50 +00:00
|
|
|
int err = 0;
|
2005-06-29 09:51:27 +00:00
|
|
|
|
2007-05-26 05:24:19 +00:00
|
|
|
if (open_pack_index(p))
|
|
|
|
return error("packfile %s index not opened", p->pack_name);
|
|
|
|
index_size = p->index_size;
|
|
|
|
index_base = p->index_data;
|
|
|
|
|
2005-06-29 09:51:27 +00:00
|
|
|
/* Verify SHA1 sum of the index file */
|
2018-02-01 02:18:43 +00:00
|
|
|
the_hash_algo->init_fn(&ctx);
|
|
|
|
the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
|
|
|
|
the_hash_algo->final_fn(hash, &ctx);
|
2018-08-28 21:22:52 +00:00
|
|
|
if (!hasheq(hash, index_base + index_size - the_hash_algo->rawsz))
|
2018-02-01 02:18:43 +00:00
|
|
|
err = error("Packfile index for %s hash mismatch",
|
2005-07-01 00:15:39 +00:00
|
|
|
p->pack_name);
|
2010-04-19 14:23:07 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-11-10 05:49:07 +00:00
|
|
|
int verify_pack(struct repository *r, struct packed_git *p, verify_fn fn,
|
2011-11-07 02:59:26 +00:00
|
|
|
struct progress *progress, uint32_t base_count)
|
2010-04-19 14:23:07 +00:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct pack_window *w_curs = NULL;
|
|
|
|
|
|
|
|
err |= verify_pack_index(p);
|
|
|
|
if (!p->index_data)
|
|
|
|
return -1;
|
2005-07-01 00:15:39 +00:00
|
|
|
|
2018-11-10 05:49:07 +00:00
|
|
|
err |= verify_packfile(r, p, &w_curs, fn, progress, base_count);
|
2008-05-29 21:34:50 +00:00
|
|
|
unuse_pack(&w_curs);
|
2005-07-01 00:15:39 +00:00
|
|
|
|
2008-05-29 21:34:50 +00:00
|
|
|
return err;
|
2005-06-29 09:51:27 +00:00
|
|
|
}
|