git/builtin/unpack-objects.c

693 lines
16 KiB
C
Raw Normal View History

#include "builtin.h"
#include "cache.h"
#include "bulk-checkin.h"
#include "config.h"
#include "environment.h"
#include "gettext.h"
#include "git-zlib.h"
#include "hex.h"
#include "object-store.h"
#include "object.h"
#include "delta.h"
#include "pack.h"
#include "blob.h"
#include "commit.h"
#include "replace-object.h"
#include "tag.h"
#include "tree.h"
#include "tree-walk.h"
#include "progress.h"
#include "decorate.h"
#include "fsck.h"
static int dry_run, quiet, recover, has_errors, strict;
static const char unpack_usage[] = "git unpack-objects [-n] [-q] [-r] [--strict]";
/* We always read in 4kB chunks. */
static unsigned char buffer[4096];
static unsigned int offset, len;
static off_t consumed_bytes;
static off_t max_input_size;
static git_hash_ctx ctx;
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
static struct progress *progress;
/*
* When running under --strict mode, objects whose reachability are
* suspect are kept in core without getting written in the object
* store.
*/
struct obj_buffer {
char *buffer;
unsigned long size;
};
static struct decoration obj_decorate;
static struct obj_buffer *lookup_object_buffer(struct object *base)
{
return lookup_decoration(&obj_decorate, base);
}
static void add_object_buffer(struct object *object, char *buffer, unsigned long size)
{
struct obj_buffer *obj;
CALLOC_ARRAY(obj, 1);
obj->buffer = buffer;
obj->size = size;
if (add_decoration(&obj_decorate, object, obj))
die("object %s tried to add buffer twice!", oid_to_hex(&object->oid));
}
/*
* Make sure at least "min" bytes are available in the buffer, and
* return the pointer to the buffer.
*/
static void *fill(int min)
{
if (min <= len)
return buffer + offset;
if (min > sizeof(buffer))
die("cannot fill %d bytes", min);
if (offset) {
the_hash_algo->update_fn(&ctx, buffer, offset);
memmove(buffer, buffer + offset, len);
offset = 0;
}
do {
ssize_t ret = xread(0, buffer + len, sizeof(buffer) - len);
if (ret <= 0) {
if (!ret)
die("early EOF");
die_errno("read error on input");
}
len += ret;
} while (len < min);
return buffer;
}
static void use(int bytes)
{
if (bytes > len)
die("used more bytes than were available");
len -= bytes;
offset += bytes;
/* make sure off_t is sufficiently large not to wrap */
if (signed_add_overflows(consumed_bytes, bytes))
die("pack too large for current definition of off_t");
consumed_bytes += bytes;
if (max_input_size && consumed_bytes > max_input_size)
die(_("pack exceeds maximum allowed size"));
display_throughput(progress, consumed_bytes);
}
/*
* Decompress zstream from the standard input into a newly
* allocated buffer of specified size and return the buffer.
* The caller is responsible to free the returned buffer.
*
* But for dry_run mode, "get_data()" is only used to check the
* integrity of data, and the returned buffer is not used at all.
* Therefore, in dry_run mode, "get_data()" will release the small
* allocated buffer which is reused to hold temporary zstream output
* and return NULL instead of returning garbage data.
*/
static void *get_data(unsigned long size)
{
2011-06-10 18:52:15 +00:00
git_zstream stream;
unsigned long bufsize = dry_run && size > 8192 ? 8192 : size;
void *buf = xmallocz(bufsize);
memset(&stream, 0, sizeof(stream));
stream.next_out = buf;
stream.avail_out = bufsize;
stream.next_in = fill(1);
stream.avail_in = len;
git_inflate_init(&stream);
for (;;) {
int ret = git_inflate(&stream, 0);
use(len - stream.avail_in);
if (stream.total_out == size && ret == Z_STREAM_END)
break;
if (ret != Z_OK) {
error("inflate returned %d", ret);
FREE_AND_NULL(buf);
if (!recover)
exit(1);
has_errors = 1;
break;
}
stream.next_in = fill(1);
stream.avail_in = len;
if (dry_run) {
/* reuse the buffer in dry_run mode */
stream.next_out = buf;
stream.avail_out = bufsize > size - stream.total_out ?
size - stream.total_out :
bufsize;
}
}
git_inflate_end(&stream);
if (dry_run)
FREE_AND_NULL(buf);
return buf;
}
struct delta_info {
struct object_id base_oid;
unsigned nr;
off_t base_offset;
unsigned long size;
void *delta;
struct delta_info *next;
};
static struct delta_info *delta_list;
static void add_delta_to_list(unsigned nr, const struct object_id *base_oid,
off_t base_offset,
void *delta, unsigned long size)
{
struct delta_info *info = xmalloc(sizeof(*info));
oidcpy(&info->base_oid, base_oid);
info->base_offset = base_offset;
info->size = size;
info->delta = delta;
info->nr = nr;
info->next = delta_list;
delta_list = info;
}
struct obj_info {
off_t offset;
struct object_id oid;
struct object *obj;
};
/* Remember to update object flag allocation in object.h */
#define FLAG_OPEN (1u<<20)
#define FLAG_WRITTEN (1u<<21)
static struct obj_info *obj_list;
static unsigned nr_objects;
/*
* Called only from check_object() after it verified this object
* is Ok.
*/
static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
{
struct object_id oid;
if (write_object_file(obj_buf->buffer, obj_buf->size,
obj->type, &oid) < 0)
die("failed to write object %s", oid_to_hex(&obj->oid));
obj->flags |= FLAG_WRITTEN;
}
/*
* At the very end of the processing, write_rest() scans the objects
* that have reachability requirements and calls this function.
* Verify its reachability and validity recursively and write it out.
*/
static int check_object(struct object *obj, enum object_type type,
void *data, struct fsck_options *options)
{
struct obj_buffer *obj_buf;
if (!obj)
return 1;
if (obj->flags & FLAG_WRITTEN)
return 0;
if (type != OBJ_ANY && obj->type != type)
die("object type mismatch");
if (!(obj->flags & FLAG_OPEN)) {
unsigned long size;
int type = oid_object_info(the_repository, &obj->oid, &size);
if (type != obj->type || type <= 0)
die("object of unexpected type");
obj->flags |= FLAG_WRITTEN;
return 0;
}
obj_buf = lookup_object_buffer(obj);
if (!obj_buf)
die("Whoops! Cannot find object '%s'", oid_to_hex(&obj->oid));
if (fsck_object(obj, obj_buf->buffer, obj_buf->size, &fsck_options))
die("fsck error in packed object");
fsck_options.walk = check_object;
if (fsck_walk(obj, NULL, &fsck_options))
die("Error on reachable objects of %s", oid_to_hex(&obj->oid));
write_cached_object(obj, obj_buf);
return 0;
}
static void write_rest(void)
{
unsigned i;
for (i = 0; i < nr_objects; i++) {
if (obj_list[i].obj)
check_object(obj_list[i].obj, OBJ_ANY, NULL, NULL);
}
}
static void added_object(unsigned nr, enum object_type type,
void *data, unsigned long size);
/*
* Write out nr-th object from the list, now we know the contents
* of it. Under --strict, this buffers structured objects in-core,
* to be checked at the end.
*/
static void write_object(unsigned nr, enum object_type type,
void *buf, unsigned long size)
{
if (!strict) {
if (write_object_file(buf, size, type,
&obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
obj_list[nr].obj = NULL;
} else if (type == OBJ_BLOB) {
struct blob *blob;
if (write_object_file(buf, size, type,
&obj_list[nr].oid) < 0)
die("failed to write object");
added_object(nr, type, buf, size);
free(buf);
blob = lookup_blob(the_repository, &obj_list[nr].oid);
if (blob)
blob->object.flags |= FLAG_WRITTEN;
else
die("invalid blob object");
obj_list[nr].obj = NULL;
} else {
struct object *obj;
int eaten;
hash_object_file(the_hash_algo, buf, size, type,
&obj_list[nr].oid);
added_object(nr, type, buf, size);
obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
type, size, buf,
&eaten);
if (!obj)
die("invalid %s", type_name(type));
add_object_buffer(obj, buf, size);
obj->flags |= FLAG_OPEN;
obj_list[nr].obj = obj;
}
}
static void resolve_delta(unsigned nr, enum object_type type,
void *base, unsigned long base_size,
void *delta, unsigned long delta_size)
{
void *result;
unsigned long result_size;
result = patch_delta(base, base_size,
delta, delta_size,
&result_size);
if (!result)
die("failed to apply delta");
free(delta);
write_object(nr, type, result, result_size);
}
/*
* We now know the contents of an object (which is nr-th in the pack);
* resolve all the deltified objects that are based on it.
*/
static void added_object(unsigned nr, enum object_type type,
void *data, unsigned long size)
{
struct delta_info **p = &delta_list;
struct delta_info *info;
while ((info = *p) != NULL) {
if (oideq(&info->base_oid, &obj_list[nr].oid) ||
info->base_offset == obj_list[nr].offset) {
*p = info->next;
p = &delta_list;
resolve_delta(info->nr, type, data, size,
info->delta, info->size);
free(info);
continue;
}
p = &info->next;
}
}
static void unpack_non_delta_entry(enum object_type type, unsigned long size,
unsigned nr)
{
void *buf = get_data(size);
if (buf)
write_object(nr, type, buf, size);
}
unpack-objects: use stream_loose_object() to unpack large objects Make use of the stream_loose_object() function introduced in the preceding commit to unpack large objects. Before this we'd need to malloc() the size of the blob before unpacking it, which could cause OOM with very large blobs. We could use the new streaming interface to unpack all blobs, but doing so would be much slower, as demonstrated e.g. with this benchmark using git-hyperfine[0]: rm -rf /tmp/scalar.git && git clone --bare https://github.com/Microsoft/scalar.git /tmp/scalar.git && mv /tmp/scalar.git/objects/pack/*.pack /tmp/scalar.git/my.pack && git hyperfine \ -r 2 --warmup 1 \ -L rev origin/master,HEAD -L v "10,512,1k,1m" \ -s 'make' \ -p 'git init --bare dest.git' \ -c 'rm -rf dest.git' \ './git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/scalar.git/my.pack' Here we'll perform worse with lower core.bigFileThreshold settings with this change in terms of speed, but we're getting lower memory use in return: Summary './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' ran 1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' 1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' 1.01 ± 0.02 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' 1.02 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' 1.09 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' 1.10 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' 1.11 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' A better benchmark to demonstrate the benefits of that this one, which creates an artificial repo with a 1, 25, 50, 75 and 100MB blob: rm -rf /tmp/repo && git init /tmp/repo && ( cd /tmp/repo && for i in 1 25 50 75 100 do dd if=/dev/urandom of=blob.$i count=$(($i*1024)) bs=1024 done && git add blob.* && git commit -mblobs && git gc && PACK=$(echo .git/objects/pack/pack-*.pack) && cp "$PACK" my.pack ) && git hyperfine \ --show-output \ -L rev origin/master,HEAD -L v "512,50m,100m" \ -s 'make' \ -p 'git init --bare dest.git' \ -c 'rm -rf dest.git' \ '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' Using this test we'll always use >100MB of memory on origin/master (around ~105MB), but max out at e.g. ~55MB if we set core.bigFileThreshold=50m. The relevant "Maximum resident set size" lines were manually added below the relevant benchmark: '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' ran Maximum resident set size (kbytes): 107080 1.02 ± 0.78 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' Maximum resident set size (kbytes): 106968 1.09 ± 0.79 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' Maximum resident set size (kbytes): 107032 1.42 ± 1.07 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD' Maximum resident set size (kbytes): 107072 1.83 ± 1.02 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD' Maximum resident set size (kbytes): 55704 2.16 ± 1.19 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD' Maximum resident set size (kbytes): 4564 This shows that if you have enough memory this new streaming method is slower the lower you set the streaming threshold, but the benefit is more bounded memory use. An earlier version of this patch introduced a new "core.bigFileStreamingThreshold" instead of re-using the existing "core.bigFileThreshold" variable[1]. As noted in a detailed overview of its users in [2] using it has several different meanings. Still, we consider it good enough to simply re-use it. While it's possible that someone might want to e.g. consider objects "small" for the purposes of diffing but "big" for the purposes of writing them such use-cases are probably too obscure to worry about. We can always split up "core.bigFileThreshold" in the future if there's a need for that. 0. https://github.com/avar/git-hyperfine/ 1. https://lore.kernel.org/git/20211210103435.83656-1-chiyutianyi@gmail.com/ 2. https://lore.kernel.org/git/20220120112114.47618-5-chiyutianyi@gmail.com/ Helped-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Helped-by: Derrick Stolee <stolee@gmail.com> Helped-by: Jiang Xin <zhiyou.jx@alibaba-inc.com> Signed-off-by: Han Xin <chiyutianyi@gmail.com> Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-11 02:44:21 +00:00
struct input_zstream_data {
git_zstream *zstream;
unsigned char buf[8192];
int status;
};
static const void *feed_input_zstream(struct input_stream *in_stream,
unsigned long *readlen)
{
struct input_zstream_data *data = in_stream->data;
git_zstream *zstream = data->zstream;
void *in = fill(1);
if (in_stream->is_finished) {
*readlen = 0;
return NULL;
}
zstream->next_out = data->buf;
zstream->avail_out = sizeof(data->buf);
zstream->next_in = in;
zstream->avail_in = len;
data->status = git_inflate(zstream, 0);
in_stream->is_finished = data->status != Z_OK;
use(len - zstream->avail_in);
*readlen = sizeof(data->buf) - zstream->avail_out;
return data->buf;
}
static void stream_blob(unsigned long size, unsigned nr)
{
git_zstream zstream = { 0 };
struct input_zstream_data data = { 0 };
struct input_stream in_stream = {
.read = feed_input_zstream,
.data = &data,
};
struct obj_info *info = &obj_list[nr];
data.zstream = &zstream;
git_inflate_init(&zstream);
if (stream_loose_object(&in_stream, size, &info->oid))
die(_("failed to write object in stream"));
if (data.status != Z_STREAM_END)
die(_("inflate returned (%d)"), data.status);
git_inflate_end(&zstream);
if (strict) {
struct blob *blob = lookup_blob(the_repository, &info->oid);
if (!blob)
die(_("invalid blob object from stream"));
blob->object.flags |= FLAG_WRITTEN;
}
info->obj = NULL;
}
static int resolve_against_held(unsigned nr, const struct object_id *base,
void *delta_data, unsigned long delta_size)
{
struct object *obj;
struct obj_buffer *obj_buffer;
obj = lookup_object(the_repository, base);
if (!obj)
return 0;
obj_buffer = lookup_object_buffer(obj);
if (!obj_buffer)
return 0;
resolve_delta(nr, obj->type, obj_buffer->buffer,
obj_buffer->size, delta_data, delta_size);
return 1;
}
static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
unsigned nr)
{
void *delta_data, *base;
unsigned long base_size;
struct object_id base_oid;
if (type == OBJ_REF_DELTA) {
oidread(&base_oid, fill(the_hash_algo->rawsz));
use(the_hash_algo->rawsz);
delta_data = get_data(delta_size);
if (!delta_data)
return;
if (repo_has_object_file(the_repository, &base_oid))
; /* Ok we have this one */
else if (resolve_against_held(nr, &base_oid,
delta_data, delta_size))
return; /* we are done */
else {
/* cannot resolve yet --- queue it */
oidclr(&obj_list[nr].oid);
add_delta_to_list(nr, &base_oid, 0, delta_data, delta_size);
return;
}
} else {
unsigned base_found = 0;
unsigned char *pack, c;
off_t base_offset;
unsigned lo, mid, hi;
pack = fill(1);
c = *pack;
use(1);
base_offset = c & 127;
while (c & 128) {
base_offset += 1;
if (!base_offset || MSB(base_offset, 7))
die("offset value overflow for delta base object");
pack = fill(1);
c = *pack;
use(1);
base_offset = (base_offset << 7) + (c & 127);
}
base_offset = obj_list[nr].offset - base_offset;
if (base_offset <= 0 || base_offset >= obj_list[nr].offset)
die("offset value out of bound for delta base object");
delta_data = get_data(delta_size);
if (!delta_data)
return;
lo = 0;
hi = nr;
while (lo < hi) {
mid = lo + (hi - lo) / 2;
if (base_offset < obj_list[mid].offset) {
hi = mid;
} else if (base_offset > obj_list[mid].offset) {
lo = mid + 1;
} else {
oidcpy(&base_oid, &obj_list[mid].oid);
base_found = !is_null_oid(&base_oid);
break;
}
}
if (!base_found) {
/*
* The delta base object is itself a delta that
* has not been resolved yet.
*/
oidclr(&obj_list[nr].oid);
add_delta_to_list(nr, null_oid(), base_offset,
delta_data, delta_size);
return;
}
}
if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
return;
base = repo_read_object_file(the_repository, &base_oid, &type,
&base_size);
if (!base) {
error("failed to read delta-pack base object %s",
oid_to_hex(&base_oid));
if (!recover)
exit(1);
has_errors = 1;
return;
}
resolve_delta(nr, type, base, base_size, delta_data, delta_size);
free(base);
}
static void unpack_one(unsigned nr)
{
unsigned shift;
Fix big left-shifts of unsigned char Shifting 'unsigned char' or 'unsigned short' left can result in sign extension errors, since the C integer promotion rules means that the unsigned char/short will get implicitly promoted to a signed 'int' due to the shift (or due to other operations). This normally doesn't matter, but if you shift things up sufficiently, it will now set the sign bit in 'int', and a subsequent cast to a bigger type (eg 'long' or 'unsigned long') will now sign-extend the value despite the original expression being unsigned. One example of this would be something like unsigned long size; unsigned char c; size += c << 24; where despite all the variables being unsigned, 'c << 24' ends up being a signed entity, and will get sign-extended when then doing the addition in an 'unsigned long' type. Since git uses 'unsigned char' pointers extensively, we actually have this bug in a couple of places. I may have missed some, but this is the result of looking at git grep '[^0-9 ][ ]*<<[ ][a-z]' -- '*.c' '*.h' git grep '<<[ ]*24' which catches at least the common byte cases (shifting variables by a variable amount, and shifting by 24 bits). I also grepped for just 'unsigned char' variables in general, and converted the ones that most obviously ended up getting implicitly cast immediately anyway (eg hash_name(), encode_85()). In addition to just avoiding 'unsigned char', this patch also tries to use a common idiom for the delta header size thing. We had three different variations on it: "& 0x7fUL" in one place (getting the sign extension right), and "& ~0x80" and "& 0x7f" in two other places (not getting it right). Apart from making them all just avoid using "unsigned char" at all, I also unified them to then use a simple "& 0x7f". I considered making a sparse extension which warns about doing implicit casts from unsigned types to signed types, but it gets rather complex very quickly, so this is just a hack. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-06-18 00:22:27 +00:00
unsigned char *pack;
unsigned long size, c;
enum object_type type;
obj_list[nr].offset = consumed_bytes;
pack = fill(1);
c = *pack;
use(1);
type = (c >> 4) & 7;
size = (c & 15);
shift = 4;
while (c & 0x80) {
pack = fill(1);
c = *pack;
use(1);
size += (c & 0x7f) << shift;
shift += 7;
}
switch (type) {
unpack-objects: use stream_loose_object() to unpack large objects Make use of the stream_loose_object() function introduced in the preceding commit to unpack large objects. Before this we'd need to malloc() the size of the blob before unpacking it, which could cause OOM with very large blobs. We could use the new streaming interface to unpack all blobs, but doing so would be much slower, as demonstrated e.g. with this benchmark using git-hyperfine[0]: rm -rf /tmp/scalar.git && git clone --bare https://github.com/Microsoft/scalar.git /tmp/scalar.git && mv /tmp/scalar.git/objects/pack/*.pack /tmp/scalar.git/my.pack && git hyperfine \ -r 2 --warmup 1 \ -L rev origin/master,HEAD -L v "10,512,1k,1m" \ -s 'make' \ -p 'git init --bare dest.git' \ -c 'rm -rf dest.git' \ './git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/scalar.git/my.pack' Here we'll perform worse with lower core.bigFileThreshold settings with this change in terms of speed, but we're getting lower memory use in return: Summary './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' ran 1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' 1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' 1.01 ± 0.02 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' 1.02 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' 1.09 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' 1.10 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' 1.11 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD' A better benchmark to demonstrate the benefits of that this one, which creates an artificial repo with a 1, 25, 50, 75 and 100MB blob: rm -rf /tmp/repo && git init /tmp/repo && ( cd /tmp/repo && for i in 1 25 50 75 100 do dd if=/dev/urandom of=blob.$i count=$(($i*1024)) bs=1024 done && git add blob.* && git commit -mblobs && git gc && PACK=$(echo .git/objects/pack/pack-*.pack) && cp "$PACK" my.pack ) && git hyperfine \ --show-output \ -L rev origin/master,HEAD -L v "512,50m,100m" \ -s 'make' \ -p 'git init --bare dest.git' \ -c 'rm -rf dest.git' \ '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' Using this test we'll always use >100MB of memory on origin/master (around ~105MB), but max out at e.g. ~55MB if we set core.bigFileThreshold=50m. The relevant "Maximum resident set size" lines were manually added below the relevant benchmark: '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' ran Maximum resident set size (kbytes): 107080 1.02 ± 0.78 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' Maximum resident set size (kbytes): 106968 1.09 ± 0.79 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' Maximum resident set size (kbytes): 107032 1.42 ± 1.07 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD' Maximum resident set size (kbytes): 107072 1.83 ± 1.02 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD' Maximum resident set size (kbytes): 55704 2.16 ± 1.19 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD' Maximum resident set size (kbytes): 4564 This shows that if you have enough memory this new streaming method is slower the lower you set the streaming threshold, but the benefit is more bounded memory use. An earlier version of this patch introduced a new "core.bigFileStreamingThreshold" instead of re-using the existing "core.bigFileThreshold" variable[1]. As noted in a detailed overview of its users in [2] using it has several different meanings. Still, we consider it good enough to simply re-use it. While it's possible that someone might want to e.g. consider objects "small" for the purposes of diffing but "big" for the purposes of writing them such use-cases are probably too obscure to worry about. We can always split up "core.bigFileThreshold" in the future if there's a need for that. 0. https://github.com/avar/git-hyperfine/ 1. https://lore.kernel.org/git/20211210103435.83656-1-chiyutianyi@gmail.com/ 2. https://lore.kernel.org/git/20220120112114.47618-5-chiyutianyi@gmail.com/ Helped-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Helped-by: Derrick Stolee <stolee@gmail.com> Helped-by: Jiang Xin <zhiyou.jx@alibaba-inc.com> Signed-off-by: Han Xin <chiyutianyi@gmail.com> Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-11 02:44:21 +00:00
case OBJ_BLOB:
if (!dry_run && size > big_file_threshold) {
stream_blob(size, nr);
return;
}
/* fallthrough */
case OBJ_COMMIT:
case OBJ_TREE:
case OBJ_TAG:
unpack_non_delta_entry(type, size, nr);
return;
case OBJ_REF_DELTA:
case OBJ_OFS_DELTA:
unpack_delta_entry(type, size, nr);
return;
default:
error("bad object type %d", type);
has_errors = 1;
if (recover)
return;
exit(1);
}
}
static void unpack_all(void)
{
int i;
struct pack_header *hdr = fill(sizeof(struct pack_header));
nr_objects = ntohl(hdr->hdr_entries);
if (ntohl(hdr->hdr_signature) != PACK_SIGNATURE)
die("bad pack file");
if (!pack_version_ok(hdr->hdr_version))
die("unknown pack file version %"PRIu32,
ntohl(hdr->hdr_version));
use(sizeof(struct pack_header));
if (!quiet)
progress = start_progress(_("Unpacking objects"), nr_objects);
CALLOC_ARRAY(obj_list, nr_objects);
begin_odb_transaction();
for (i = 0; i < nr_objects; i++) {
unpack_one(i);
display_progress(progress, i + 1);
}
end_odb_transaction();
stop_progress(&progress);
if (delta_list)
die("unresolved deltas left after unpacking");
}
builtins: mark unused prefix parameters All builtins receive a "prefix" parameter, but it is only useful if they need to adjust filenames given by the user on the command line. For builtins that do not even call parse_options(), they often don't look at the prefix at all, and -Wunused-parameter complains. Let's annotate those to silence the compiler warning. I gave a quick scan of each of these cases, and it seems like they don't have anything they _should_ be using the prefix for (i.e., there is no hidden bug that we are missing). The only questionable cases I saw were: - in git-unpack-file, we create a tempfile which will always be at the root of the repository, even if the command is run from a subdir. Arguably this should be created in the subdir from which we're run (as we report the path only as a relative name). However, nobody has complained, and I'm hesitant to change something that is deep plumbing going back to April 2005 (though I think within our scripts, the sole caller in git-merge-one-file would be OK, as it moves to the toplevel itself). - in fetch-pack, local-filesystem remotes are taken as relative to the project root, not the current directory. So: git init server.git [...put stuff in server.git...] git init client.git cd client.git mkdir subdir cd subdir git fetch-pack ../../server.git ... won't work, as we quietly move to the top of the repository before interpreting the path (so "../server.git" would work). This is weird, but again, nobody has complained and this is how it has always worked. And this is how "git fetch" works, too. Plus it raises questions about how a configured remote like: git config remote.origin.url ../server.git should behave. I can certainly come up with a reasonable set of behavior, but it may not be worth stirring up complications in a plumbing tool. So I've left the behavior untouched in both of those cases. If anybody really wants to revisit them, it's easy enough to drop the UNUSED marker. This commit is just about removing them as obstacles to turning on -Wunused-parameter all the time. Signed-off-by: Jeff King <peff@peff.net> Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-03-28 20:56:55 +00:00
int cmd_unpack_objects(int argc, const char **argv, const char *prefix UNUSED)
{
int i;
struct object_id oid;
read_replace_refs = 0;
git_config(git_default_config, NULL);
quiet = !isatty(2);
for (i = 1 ; i < argc; i++) {
const char *arg = argv[i];
if (*arg == '-') {
if (!strcmp(arg, "-n")) {
dry_run = 1;
continue;
}
if (!strcmp(arg, "-q")) {
quiet = 1;
continue;
}
if (!strcmp(arg, "-r")) {
recover = 1;
continue;
}
if (!strcmp(arg, "--strict")) {
strict = 1;
continue;
}
if (skip_prefix(arg, "--strict=", &arg)) {
strict = 1;
fsck_set_msg_types(&fsck_options, arg);
continue;
}
if (starts_with(arg, "--pack_header=")) {
struct pack_header *hdr;
char *c;
hdr = (struct pack_header *)buffer;
hdr->hdr_signature = htonl(PACK_SIGNATURE);
hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10));
if (*c != ',')
die("bad %s", arg);
hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10));
if (*c)
die("bad %s", arg);
len = sizeof(*hdr);
continue;
}
if (skip_prefix(arg, "--max-input-size=", &arg)) {
max_input_size = strtoumax(arg, NULL, 10);
continue;
}
usage(unpack_usage);
}
/* We don't take any non-flag arguments now.. Maybe some day */
usage(unpack_usage);
}
the_hash_algo->init_fn(&ctx);
unpack_all();
the_hash_algo->update_fn(&ctx, buffer, offset);
the_hash_algo->final_oid_fn(&oid, &ctx);
if (strict) {
write_rest();
if (fsck_finish(&fsck_options))
die(_("fsck error in pack objects"));
}
if (!hasheq(fill(the_hash_algo->rawsz), oid.hash))
die("final sha1 did not match");
use(the_hash_algo->rawsz);
/* Write the last part of the buffer to stdout */
while (len) {
int ret = xwrite(1, buffer + offset, len);
if (ret <= 0)
break;
len -= ret;
offset += ret;
}
/* All done */
return has_errors;
}