Merge branch 'jk/4gb-idx'

The code was not prepared to deal with pack .idx file that is
larger than 4GB.

* jk/4gb-idx:
  packfile: detect overflow in .idx file size checks
  block-sha1: take a size_t length parameter
  fsck: correctly compute checksums on idx files larger than 4GB
  use size_t to store pack .idx byte offsets
  compute pack .idx byte offsets using size_t
This commit is contained in:
Junio C Hamano 2020-11-25 15:24:52 -08:00
commit fcf26ef53a
7 changed files with 19 additions and 19 deletions

View file

@ -203,7 +203,7 @@ void blk_SHA1_Init(blk_SHA_CTX *ctx)
ctx->H[4] = 0xc3d2e1f0;
}
void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *data, unsigned long len)
void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *data, size_t len)
{
unsigned int lenW = ctx->size & 63;

View file

@ -13,7 +13,7 @@ typedef struct {
} blk_SHA_CTX;
void blk_SHA1_Init(blk_SHA_CTX *ctx);
void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *dataIn, unsigned long len);
void blk_SHA1_Update(blk_SHA_CTX *ctx, const void *dataIn, size_t len);
void blk_SHA1_Final(unsigned char hashout[20], blk_SHA_CTX *ctx);
#define platform_SHA_CTX blk_SHA_CTX

View file

@ -1597,7 +1597,7 @@ static void read_v2_anomalous_offsets(struct packed_git *p,
/* The address of the 4-byte offset table */
idx1 = (((const uint32_t *)((const uint8_t *)p->index_data + p->crc_offset))
+ p->num_objects /* CRC32 table */
+ (size_t)p->num_objects /* CRC32 table */
);
/* The address of the 8-byte offset table */

View file

@ -236,7 +236,7 @@ static struct pack_list * pack_list_difference(const struct pack_list *A,
static void cmp_two_packs(struct pack_list *p1, struct pack_list *p2)
{
unsigned long p1_off = 0, p2_off = 0, p1_step, p2_step;
size_t p1_off = 0, p2_off = 0, p1_step, p2_step;
const unsigned char *p1_base, *p2_base;
struct llist_item *p1_hint = NULL, *p2_hint = NULL;
const unsigned int hashsz = the_hash_algo->rawsz;
@ -280,7 +280,7 @@ static void cmp_two_packs(struct pack_list *p1, struct pack_list *p2)
static size_t sizeof_union(struct packed_git *p1, struct packed_git *p2)
{
size_t ret = 0;
unsigned long p1_off = 0, p2_off = 0, p1_step, p2_step;
size_t p1_off = 0, p2_off = 0, p1_step, p2_step;
const unsigned char *p1_base, *p2_base;
const unsigned int hashsz = the_hash_algo->rawsz;
@ -499,7 +499,7 @@ static void scan_alt_odb_packs(void)
static struct pack_list * add_pack(struct packed_git *p)
{
struct pack_list l;
unsigned long off = 0, step;
size_t off = 0, step;
const unsigned char *base;
if (!p->pack_local && !(alt_odb || verbose))

View file

@ -39,7 +39,7 @@ int check_pack_crc(struct packed_git *p, struct pack_window **w_curs,
} while (len);
index_crc = p->index_data;
index_crc += 2 + 256 + p->num_objects * (the_hash_algo->rawsz/4) + nr;
index_crc += 2 + 256 + (size_t)p->num_objects * (the_hash_algo->rawsz/4) + nr;
return data_crc != ntohl(*index_crc);
}
@ -164,7 +164,7 @@ static int verify_packfile(struct repository *r,
int verify_pack_index(struct packed_git *p)
{
off_t index_size;
size_t len;
const unsigned char *index_base;
git_hash_ctx ctx;
unsigned char hash[GIT_MAX_RAWSZ];
@ -172,14 +172,14 @@ int verify_pack_index(struct packed_git *p)
if (open_pack_index(p))
return error("packfile %s index not opened", p->pack_name);
index_size = p->index_size;
index_base = p->index_data;
len = p->index_size - the_hash_algo->rawsz;
/* Verify SHA1 sum of the index file */
the_hash_algo->init_fn(&ctx);
the_hash_algo->update_fn(&ctx, index_base, (unsigned int)(index_size - the_hash_algo->rawsz));
the_hash_algo->update_fn(&ctx, index_base, len);
the_hash_algo->final_fn(hash, &ctx);
if (!hasheq(hash, index_base + index_size - the_hash_algo->rawsz))
if (!hasheq(hash, index_base + len))
err = error("Packfile index for %s hash mismatch",
p->pack_name);
return err;

View file

@ -130,7 +130,7 @@ static void create_pack_revindex(struct packed_git *p)
if (p->index_version > 1) {
const uint32_t *off_32 =
(uint32_t *)(index + 8 + p->num_objects * (hashsz + 4));
(uint32_t *)(index + 8 + (size_t)p->num_objects * (hashsz + 4));
const uint32_t *off_64 = off_32 + p->num_objects;
for (i = 0; i < num_ent; i++) {
const uint32_t off = ntohl(*off_32++);

View file

@ -148,7 +148,7 @@ int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
* - hash of the packfile
* - file checksum
*/
if (idx_size != 4 * 256 + nr * (hashsz + 4) + hashsz + hashsz)
if (idx_size != st_add(4 * 256 + hashsz + hashsz, st_mult(nr, hashsz + 4)))
return error("wrong index v1 file size in %s", path);
} else if (version == 2) {
/*
@ -164,10 +164,10 @@ int load_idx(const char *path, const unsigned int hashsz, void *idx_map,
* variable sized table containing 8-byte entries
* for offsets larger than 2^31.
*/
unsigned long min_size = 8 + 4*256 + nr*(hashsz + 4 + 4) + hashsz + hashsz;
unsigned long max_size = min_size;
size_t min_size = st_add(8 + 4*256 + hashsz + hashsz, st_mult(nr, hashsz + 4 + 4));
size_t max_size = min_size;
if (nr)
max_size += (nr - 1)*8;
max_size = st_add(max_size, st_mult(nr - 1, 8));
if (idx_size < min_size || idx_size > max_size)
return error("wrong index v2 file size in %s", path);
if (idx_size != min_size &&
@ -1933,14 +1933,14 @@ off_t nth_packed_object_offset(const struct packed_git *p, uint32_t n)
const unsigned int hashsz = the_hash_algo->rawsz;
index += 4 * 256;
if (p->index_version == 1) {
return ntohl(*((uint32_t *)(index + (hashsz + 4) * n)));
return ntohl(*((uint32_t *)(index + (hashsz + 4) * (size_t)n)));
} else {
uint32_t off;
index += 8 + p->num_objects * (hashsz + 4);
index += 8 + (size_t)p->num_objects * (hashsz + 4);
off = ntohl(*((uint32_t *)(index + 4 * n)));
if (!(off & 0x80000000))
return off;
index += p->num_objects * 4 + (off & 0x7fffffff) * 8;
index += (size_t)p->num_objects * 4 + (off & 0x7fffffff) * 8;
check_pack_index_ptr(p, index);
return get_be64(index);
}