2006-08-03 15:24:37 +00:00
|
|
|
#include "builtin.h"
|
2022-04-05 05:20:13 +00:00
|
|
|
#include "bulk-checkin.h"
|
2017-06-14 18:07:36 +00:00
|
|
|
#include "config.h"
|
2023-03-21 06:26:03 +00:00
|
|
|
#include "environment.h"
|
2023-03-21 06:25:54 +00:00
|
|
|
#include "gettext.h"
|
2023-04-11 07:41:51 +00:00
|
|
|
#include "git-zlib.h"
|
2023-02-24 00:09:27 +00:00
|
|
|
#include "hex.h"
|
2023-05-16 06:34:06 +00:00
|
|
|
#include "object-store-ll.h"
|
2005-06-25 22:59:31 +00:00
|
|
|
#include "object.h"
|
2005-06-26 11:29:18 +00:00
|
|
|
#include "delta.h"
|
2005-06-28 21:21:02 +00:00
|
|
|
#include "pack.h"
|
2006-04-02 12:44:09 +00:00
|
|
|
#include "blob.h"
|
|
|
|
#include "commit.h"
|
2023-02-24 00:09:33 +00:00
|
|
|
#include "replace-object.h"
|
2023-07-05 17:09:19 +00:00
|
|
|
#include "strbuf.h"
|
2006-04-02 12:44:09 +00:00
|
|
|
#include "tag.h"
|
|
|
|
#include "tree.h"
|
2008-02-25 21:46:11 +00:00
|
|
|
#include "tree-walk.h"
|
2007-04-18 18:27:45 +00:00
|
|
|
#include "progress.h"
|
2008-02-25 21:46:10 +00:00
|
|
|
#include "decorate.h"
|
2008-02-25 21:46:11 +00:00
|
|
|
#include "fsck.h"
|
2005-06-25 22:27:14 +00:00
|
|
|
|
2008-02-25 21:46:11 +00:00
|
|
|
static int dry_run, quiet, recover, has_errors, strict;
|
usage: do not insist that standard input must come from a file
The synopsys text and the usage string of subcommands that read list
of things from the standard input are often shown like this:
git gostak [--distim] < <list-of-doshes>
This is problematic in a number of ways:
* The way to use these commands is more often to feed them the
output from another command, not feed them from a file.
* Manual pages outside Git, commands that operate on the data read
from the standard input, e.g "sort", "grep", "sed", etc., are not
described with such a "< redirection-from-file" in their synopsys
text. Our doing so introduces inconsistency.
* We do not insist on where the output should go, by saying
git gostak [--distim] < <list-of-doshes> > <output>
* As it is our convention to enclose placeholders inside <braket>,
the redirection operator followed by a placeholder filename
becomes very hard to read, both in the documentation and in the
help text.
Let's clean them all up, after making sure that the documentation
clearly describes the modes that take information from the standard
input and what kind of things are expected on the input.
[jc: stole example for fmt-merge-msg from Jonathan]
Helped-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-10-16 18:27:42 +00:00
|
|
|
static const char unpack_usage[] = "git unpack-objects [-n] [-q] [-r] [--strict]";
|
2005-06-25 22:27:14 +00:00
|
|
|
|
2005-06-29 03:34:23 +00:00
|
|
|
/* We always read in 4kB chunks. */
|
|
|
|
static unsigned char buffer[4096];
|
2007-04-09 05:06:30 +00:00
|
|
|
static unsigned int offset, len;
|
|
|
|
static off_t consumed_bytes;
|
2016-08-24 18:41:56 +00:00
|
|
|
static off_t max_input_size;
|
2018-02-01 02:18:40 +00:00
|
|
|
static git_hash_ctx ctx;
|
2015-06-22 15:25:00 +00:00
|
|
|
static struct fsck_options fsck_options = FSCK_OPTIONS_STRICT;
|
2019-11-19 01:25:25 +00:00
|
|
|
static struct progress *progress;
|
2005-06-25 22:27:14 +00:00
|
|
|
|
2008-03-05 07:46:51 +00:00
|
|
|
/*
|
|
|
|
* When running under --strict mode, objects whose reachability are
|
|
|
|
* suspect are kept in core without getting written in the object
|
|
|
|
* store.
|
|
|
|
*/
|
2008-02-25 21:46:10 +00:00
|
|
|
struct obj_buffer {
|
|
|
|
char *buffer;
|
|
|
|
unsigned long size;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct decoration obj_decorate;
|
|
|
|
|
|
|
|
static struct obj_buffer *lookup_object_buffer(struct object *base)
|
|
|
|
{
|
|
|
|
return lookup_decoration(&obj_decorate, base);
|
|
|
|
}
|
|
|
|
|
2008-02-25 21:46:11 +00:00
|
|
|
static void add_object_buffer(struct object *object, char *buffer, unsigned long size)
|
|
|
|
{
|
|
|
|
struct obj_buffer *obj;
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(obj, 1);
|
2008-02-25 21:46:11 +00:00
|
|
|
obj->buffer = buffer;
|
|
|
|
obj->size = size;
|
|
|
|
if (add_decoration(&obj_decorate, object, obj))
|
2015-11-10 02:22:28 +00:00
|
|
|
die("object %s tried to add buffer twice!", oid_to_hex(&object->oid));
|
2008-02-25 21:46:11 +00:00
|
|
|
}
|
|
|
|
|
2005-06-29 03:34:23 +00:00
|
|
|
/*
|
|
|
|
* Make sure at least "min" bytes are available in the buffer, and
|
|
|
|
* return the pointer to the buffer.
|
|
|
|
*/
|
2006-10-31 01:44:27 +00:00
|
|
|
static void *fill(int min)
|
2005-06-29 03:34:23 +00:00
|
|
|
{
|
|
|
|
if (min <= len)
|
|
|
|
return buffer + offset;
|
|
|
|
if (min > sizeof(buffer))
|
|
|
|
die("cannot fill %d bytes", min);
|
|
|
|
if (offset) {
|
2018-02-01 02:18:40 +00:00
|
|
|
the_hash_algo->update_fn(&ctx, buffer, offset);
|
2006-10-31 01:44:27 +00:00
|
|
|
memmove(buffer, buffer + offset, len);
|
2005-06-29 03:34:23 +00:00
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
do {
|
2007-05-15 12:49:22 +00:00
|
|
|
ssize_t ret = xread(0, buffer + len, sizeof(buffer) - len);
|
2005-06-29 03:34:23 +00:00
|
|
|
if (ret <= 0) {
|
|
|
|
if (!ret)
|
|
|
|
die("early EOF");
|
2009-06-27 15:58:46 +00:00
|
|
|
die_errno("read error on input");
|
2005-06-29 03:34:23 +00:00
|
|
|
}
|
|
|
|
len += ret;
|
|
|
|
} while (len < min);
|
|
|
|
return buffer;
|
|
|
|
}
|
2005-06-25 22:59:31 +00:00
|
|
|
|
2005-06-29 03:34:23 +00:00
|
|
|
static void use(int bytes)
|
|
|
|
{
|
|
|
|
if (bytes > len)
|
|
|
|
die("used more bytes than were available");
|
|
|
|
len -= bytes;
|
|
|
|
offset += bytes;
|
2007-04-09 05:06:30 +00:00
|
|
|
|
|
|
|
/* make sure off_t is sufficiently large not to wrap */
|
2010-10-05 07:24:10 +00:00
|
|
|
if (signed_add_overflows(consumed_bytes, bytes))
|
2007-04-09 05:06:30 +00:00
|
|
|
die("pack too large for current definition of off_t");
|
2006-09-21 04:07:39 +00:00
|
|
|
consumed_bytes += bytes;
|
2016-08-24 18:41:56 +00:00
|
|
|
if (max_input_size && consumed_bytes > max_input_size)
|
|
|
|
die(_("pack exceeds maximum allowed size"));
|
2019-11-19 01:25:25 +00:00
|
|
|
display_throughput(progress, consumed_bytes);
|
2005-06-29 03:34:23 +00:00
|
|
|
}
|
2005-06-25 22:27:14 +00:00
|
|
|
|
2022-06-11 02:44:16 +00:00
|
|
|
/*
|
|
|
|
* Decompress zstream from the standard input into a newly
|
|
|
|
* allocated buffer of specified size and return the buffer.
|
|
|
|
* The caller is responsible to free the returned buffer.
|
|
|
|
*
|
|
|
|
* But for dry_run mode, "get_data()" is only used to check the
|
|
|
|
* integrity of data, and the returned buffer is not used at all.
|
|
|
|
* Therefore, in dry_run mode, "get_data()" will release the small
|
|
|
|
* allocated buffer which is reused to hold temporary zstream output
|
|
|
|
* and return NULL instead of returning garbage data.
|
|
|
|
*/
|
2005-06-29 03:34:23 +00:00
|
|
|
static void *get_data(unsigned long size)
|
2005-06-25 22:27:14 +00:00
|
|
|
{
|
2011-06-10 18:52:15 +00:00
|
|
|
git_zstream stream;
|
2022-06-11 02:44:16 +00:00
|
|
|
unsigned long bufsize = dry_run && size > 8192 ? 8192 : size;
|
|
|
|
void *buf = xmallocz(bufsize);
|
2005-06-29 03:34:23 +00:00
|
|
|
|
|
|
|
memset(&stream, 0, sizeof(stream));
|
|
|
|
|
|
|
|
stream.next_out = buf;
|
2022-06-11 02:44:16 +00:00
|
|
|
stream.avail_out = bufsize;
|
2005-06-29 03:34:23 +00:00
|
|
|
stream.next_in = fill(1);
|
|
|
|
stream.avail_in = len;
|
2009-01-08 03:54:47 +00:00
|
|
|
git_inflate_init(&stream);
|
2005-06-29 03:34:23 +00:00
|
|
|
|
|
|
|
for (;;) {
|
2009-01-08 03:54:47 +00:00
|
|
|
int ret = git_inflate(&stream, 0);
|
2005-06-29 03:34:23 +00:00
|
|
|
use(len - stream.avail_in);
|
|
|
|
if (stream.total_out == size && ret == Z_STREAM_END)
|
|
|
|
break;
|
2006-09-04 05:55:54 +00:00
|
|
|
if (ret != Z_OK) {
|
2012-04-30 00:28:45 +00:00
|
|
|
error("inflate returned %d", ret);
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(buf);
|
2006-09-13 19:59:20 +00:00
|
|
|
if (!recover)
|
2006-09-04 05:55:54 +00:00
|
|
|
exit(1);
|
|
|
|
has_errors = 1;
|
|
|
|
break;
|
|
|
|
}
|
2005-06-29 03:34:23 +00:00
|
|
|
stream.next_in = fill(1);
|
|
|
|
stream.avail_in = len;
|
2022-06-11 02:44:16 +00:00
|
|
|
if (dry_run) {
|
|
|
|
/* reuse the buffer in dry_run mode */
|
|
|
|
stream.next_out = buf;
|
|
|
|
stream.avail_out = bufsize > size - stream.total_out ?
|
|
|
|
size - stream.total_out :
|
|
|
|
bufsize;
|
|
|
|
}
|
2005-06-29 03:34:23 +00:00
|
|
|
}
|
2009-01-08 03:54:47 +00:00
|
|
|
git_inflate_end(&stream);
|
2022-06-11 02:44:16 +00:00
|
|
|
if (dry_run)
|
|
|
|
FREE_AND_NULL(buf);
|
2005-06-29 03:34:23 +00:00
|
|
|
return buf;
|
2005-06-25 22:27:14 +00:00
|
|
|
}
|
|
|
|
|
2005-06-29 03:34:23 +00:00
|
|
|
struct delta_info {
|
2017-05-06 22:10:12 +00:00
|
|
|
struct object_id base_oid;
|
2007-04-09 05:06:30 +00:00
|
|
|
unsigned nr;
|
|
|
|
off_t base_offset;
|
2005-06-29 03:34:23 +00:00
|
|
|
unsigned long size;
|
|
|
|
void *delta;
|
|
|
|
struct delta_info *next;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct delta_info *delta_list;
|
|
|
|
|
2017-05-06 22:10:12 +00:00
|
|
|
static void add_delta_to_list(unsigned nr, const struct object_id *base_oid,
|
2007-04-09 05:06:30 +00:00
|
|
|
off_t base_offset,
|
2006-09-21 04:07:39 +00:00
|
|
|
void *delta, unsigned long size)
|
2005-06-25 22:27:14 +00:00
|
|
|
{
|
2005-06-29 03:34:23 +00:00
|
|
|
struct delta_info *info = xmalloc(sizeof(*info));
|
2005-06-25 22:27:14 +00:00
|
|
|
|
2017-05-06 22:10:12 +00:00
|
|
|
oidcpy(&info->base_oid, base_oid);
|
2006-09-21 04:07:39 +00:00
|
|
|
info->base_offset = base_offset;
|
2005-06-29 03:34:23 +00:00
|
|
|
info->size = size;
|
|
|
|
info->delta = delta;
|
2006-09-21 04:07:39 +00:00
|
|
|
info->nr = nr;
|
2005-06-29 03:34:23 +00:00
|
|
|
info->next = delta_list;
|
|
|
|
delta_list = info;
|
2005-06-25 22:27:14 +00:00
|
|
|
}
|
|
|
|
|
2006-09-21 04:07:39 +00:00
|
|
|
struct obj_info {
|
2007-04-09 05:06:30 +00:00
|
|
|
off_t offset;
|
2017-05-06 22:10:12 +00:00
|
|
|
struct object_id oid;
|
2008-02-25 21:46:11 +00:00
|
|
|
struct object *obj;
|
2006-09-21 04:07:39 +00:00
|
|
|
};
|
|
|
|
|
2018-03-06 10:16:14 +00:00
|
|
|
/* Remember to update object flag allocation in object.h */
|
2008-02-25 21:46:11 +00:00
|
|
|
#define FLAG_OPEN (1u<<20)
|
|
|
|
#define FLAG_WRITTEN (1u<<21)
|
|
|
|
|
2006-09-21 04:07:39 +00:00
|
|
|
static struct obj_info *obj_list;
|
2009-06-18 17:28:43 +00:00
|
|
|
static unsigned nr_objects;
|
2008-02-25 21:46:11 +00:00
|
|
|
|
2008-03-05 07:46:51 +00:00
|
|
|
/*
|
|
|
|
* Called only from check_object() after it verified this object
|
|
|
|
* is Ok.
|
|
|
|
*/
|
2014-09-10 13:52:51 +00:00
|
|
|
static void write_cached_object(struct object *obj, struct obj_buffer *obj_buf)
|
2008-02-25 21:46:11 +00:00
|
|
|
{
|
2017-05-06 22:10:12 +00:00
|
|
|
struct object_id oid;
|
2014-09-10 13:52:51 +00:00
|
|
|
|
2018-01-28 00:13:19 +00:00
|
|
|
if (write_object_file(obj_buf->buffer, obj_buf->size,
|
2022-02-04 23:48:26 +00:00
|
|
|
obj->type, &oid) < 0)
|
2015-11-10 02:22:28 +00:00
|
|
|
die("failed to write object %s", oid_to_hex(&obj->oid));
|
2008-02-25 21:46:11 +00:00
|
|
|
obj->flags |= FLAG_WRITTEN;
|
|
|
|
}
|
|
|
|
|
2008-03-05 07:46:51 +00:00
|
|
|
/*
|
|
|
|
* At the very end of the processing, write_rest() scans the objects
|
|
|
|
* that have reachability requirements and calls this function.
|
|
|
|
* Verify its reachability and validity recursively and write it out.
|
|
|
|
*/
|
2021-03-28 13:15:35 +00:00
|
|
|
static int check_object(struct object *obj, enum object_type type,
|
2023-07-03 06:44:18 +00:00
|
|
|
void *data UNUSED,
|
|
|
|
struct fsck_options *options UNUSED)
|
2008-02-25 21:46:11 +00:00
|
|
|
{
|
2014-09-10 13:52:51 +00:00
|
|
|
struct obj_buffer *obj_buf;
|
|
|
|
|
2008-02-25 21:46:11 +00:00
|
|
|
if (!obj)
|
2009-08-13 19:41:14 +00:00
|
|
|
return 1;
|
2008-02-25 21:46:11 +00:00
|
|
|
|
|
|
|
if (obj->flags & FLAG_WRITTEN)
|
2009-08-13 19:41:14 +00:00
|
|
|
return 0;
|
2008-02-25 21:46:11 +00:00
|
|
|
|
|
|
|
if (type != OBJ_ANY && obj->type != type)
|
|
|
|
die("object type mismatch");
|
|
|
|
|
|
|
|
if (!(obj->flags & FLAG_OPEN)) {
|
|
|
|
unsigned long size;
|
2018-04-25 18:20:59 +00:00
|
|
|
int type = oid_object_info(the_repository, &obj->oid, &size);
|
2008-02-25 21:46:11 +00:00
|
|
|
if (type != obj->type || type <= 0)
|
|
|
|
die("object of unexpected type");
|
|
|
|
obj->flags |= FLAG_WRITTEN;
|
2009-08-13 19:41:14 +00:00
|
|
|
return 0;
|
2008-02-25 21:46:11 +00:00
|
|
|
}
|
|
|
|
|
2014-09-10 13:52:51 +00:00
|
|
|
obj_buf = lookup_object_buffer(obj);
|
|
|
|
if (!obj_buf)
|
2015-11-10 02:22:28 +00:00
|
|
|
die("Whoops! Cannot find object '%s'", oid_to_hex(&obj->oid));
|
2015-06-22 15:25:00 +00:00
|
|
|
if (fsck_object(obj, obj_buf->buffer, obj_buf->size, &fsck_options))
|
2018-05-02 20:37:09 +00:00
|
|
|
die("fsck error in packed object");
|
2015-06-22 15:25:00 +00:00
|
|
|
fsck_options.walk = check_object;
|
|
|
|
if (fsck_walk(obj, NULL, &fsck_options))
|
2015-11-10 02:22:28 +00:00
|
|
|
die("Error on reachable objects of %s", oid_to_hex(&obj->oid));
|
2014-09-10 13:52:51 +00:00
|
|
|
write_cached_object(obj, obj_buf);
|
2009-08-13 19:41:14 +00:00
|
|
|
return 0;
|
2008-02-25 21:46:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void write_rest(void)
|
|
|
|
{
|
|
|
|
unsigned i;
|
2009-08-13 19:41:14 +00:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
if (obj_list[i].obj)
|
2015-06-22 15:25:00 +00:00
|
|
|
check_object(obj_list[i].obj, OBJ_ANY, NULL, NULL);
|
2009-08-13 19:41:14 +00:00
|
|
|
}
|
2008-02-25 21:46:11 +00:00
|
|
|
}
|
2005-06-29 03:34:23 +00:00
|
|
|
|
2007-02-26 19:55:59 +00:00
|
|
|
static void added_object(unsigned nr, enum object_type type,
|
|
|
|
void *data, unsigned long size);
|
2006-09-21 04:07:39 +00:00
|
|
|
|
2008-03-05 07:46:51 +00:00
|
|
|
/*
|
|
|
|
* Write out nr-th object from the list, now we know the contents
|
|
|
|
* of it. Under --strict, this buffers structured objects in-core,
|
|
|
|
* to be checked at the end.
|
|
|
|
*/
|
2007-02-26 19:55:59 +00:00
|
|
|
static void write_object(unsigned nr, enum object_type type,
|
|
|
|
void *buf, unsigned long size)
|
2005-06-29 16:38:02 +00:00
|
|
|
{
|
2008-02-25 21:46:11 +00:00
|
|
|
if (!strict) {
|
2022-02-04 23:48:26 +00:00
|
|
|
if (write_object_file(buf, size, type,
|
2018-01-28 00:13:19 +00:00
|
|
|
&obj_list[nr].oid) < 0)
|
2008-02-25 21:46:11 +00:00
|
|
|
die("failed to write object");
|
2008-03-05 07:46:51 +00:00
|
|
|
added_object(nr, type, buf, size);
|
2008-02-25 21:46:11 +00:00
|
|
|
free(buf);
|
2008-03-05 07:46:51 +00:00
|
|
|
obj_list[nr].obj = NULL;
|
2008-02-25 21:46:11 +00:00
|
|
|
} else if (type == OBJ_BLOB) {
|
|
|
|
struct blob *blob;
|
2022-02-04 23:48:26 +00:00
|
|
|
if (write_object_file(buf, size, type,
|
2018-01-28 00:13:19 +00:00
|
|
|
&obj_list[nr].oid) < 0)
|
2008-02-25 21:46:11 +00:00
|
|
|
die("failed to write object");
|
2008-03-05 07:46:51 +00:00
|
|
|
added_object(nr, type, buf, size);
|
2008-02-25 21:46:11 +00:00
|
|
|
free(buf);
|
|
|
|
|
2018-06-29 01:21:55 +00:00
|
|
|
blob = lookup_blob(the_repository, &obj_list[nr].oid);
|
2008-02-25 21:46:11 +00:00
|
|
|
if (blob)
|
|
|
|
blob->object.flags |= FLAG_WRITTEN;
|
|
|
|
else
|
|
|
|
die("invalid blob object");
|
2008-03-05 07:46:51 +00:00
|
|
|
obj_list[nr].obj = NULL;
|
2008-02-25 21:46:11 +00:00
|
|
|
} else {
|
|
|
|
struct object *obj;
|
|
|
|
int eaten;
|
2022-02-04 23:48:32 +00:00
|
|
|
hash_object_file(the_hash_algo, buf, size, type,
|
2020-01-30 20:32:22 +00:00
|
|
|
&obj_list[nr].oid);
|
2008-03-05 07:46:51 +00:00
|
|
|
added_object(nr, type, buf, size);
|
2018-06-29 01:21:53 +00:00
|
|
|
obj = parse_object_buffer(the_repository, &obj_list[nr].oid,
|
|
|
|
type, size, buf,
|
object: convert parse_object* to take struct object_id
Make parse_object, parse_object_or_die, and parse_object_buffer take a
pointer to struct object_id. Remove the temporary variables inserted
earlier, since they are no longer necessary. Transform all of the
callers using the following semantic patch:
@@
expression E1;
@@
- parse_object(E1.hash)
+ parse_object(&E1)
@@
expression E1;
@@
- parse_object(E1->hash)
+ parse_object(E1)
@@
expression E1, E2;
@@
- parse_object_or_die(E1.hash, E2)
+ parse_object_or_die(&E1, E2)
@@
expression E1, E2;
@@
- parse_object_or_die(E1->hash, E2)
+ parse_object_or_die(E1, E2)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1.hash, E2, E3, E4, E5)
+ parse_object_buffer(&E1, E2, E3, E4, E5)
@@
expression E1, E2, E3, E4, E5;
@@
- parse_object_buffer(E1->hash, E2, E3, E4, E5)
+ parse_object_buffer(E1, E2, E3, E4, E5)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-05-06 22:10:38 +00:00
|
|
|
&eaten);
|
2008-02-25 21:46:11 +00:00
|
|
|
if (!obj)
|
2018-02-14 18:59:24 +00:00
|
|
|
die("invalid %s", type_name(type));
|
2008-02-25 21:46:11 +00:00
|
|
|
add_object_buffer(obj, buf, size);
|
|
|
|
obj->flags |= FLAG_OPEN;
|
|
|
|
obj_list[nr].obj = obj;
|
|
|
|
}
|
2005-06-29 16:38:02 +00:00
|
|
|
}
|
|
|
|
|
2007-02-26 19:55:59 +00:00
|
|
|
static void resolve_delta(unsigned nr, enum object_type type,
|
2006-09-04 05:55:54 +00:00
|
|
|
void *base, unsigned long base_size,
|
|
|
|
void *delta, unsigned long delta_size)
|
2005-06-25 22:27:14 +00:00
|
|
|
{
|
2005-06-29 03:34:23 +00:00
|
|
|
void *result;
|
|
|
|
unsigned long result_size;
|
2005-06-25 22:27:14 +00:00
|
|
|
|
2005-06-29 03:34:23 +00:00
|
|
|
result = patch_delta(base, base_size,
|
|
|
|
delta, delta_size,
|
|
|
|
&result_size);
|
|
|
|
if (!result)
|
|
|
|
die("failed to apply delta");
|
|
|
|
free(delta);
|
2007-02-26 19:55:59 +00:00
|
|
|
write_object(nr, type, result, result_size);
|
2005-06-25 22:27:14 +00:00
|
|
|
}
|
|
|
|
|
2008-03-05 07:46:51 +00:00
|
|
|
/*
|
|
|
|
* We now know the contents of an object (which is nr-th in the pack);
|
|
|
|
* resolve all the deltified objects that are based on it.
|
|
|
|
*/
|
2007-02-26 19:55:59 +00:00
|
|
|
static void added_object(unsigned nr, enum object_type type,
|
|
|
|
void *data, unsigned long size)
|
2005-06-25 22:59:31 +00:00
|
|
|
{
|
2005-06-29 03:34:23 +00:00
|
|
|
struct delta_info **p = &delta_list;
|
|
|
|
struct delta_info *info;
|
|
|
|
|
|
|
|
while ((info = *p) != NULL) {
|
convert "oidcmp() == 0" to oideq()
Using the more restrictive oideq() should, in the long run,
give the compiler more opportunities to optimize these
callsites. For now, this conversion should be a complete
noop with respect to the generated code.
The result is also perhaps a little more readable, as it
avoids the "zero is equal" idiom. Since it's so prevalent in
C, I think seasoned programmers tend not to even notice it
anymore, but it can sometimes make for awkward double
negations (e.g., we can drop a few !!oidcmp() instances
here).
This patch was generated almost entirely by the included
coccinelle patch. This mechanical conversion should be
completely safe, because we check explicitly for cases where
oidcmp() is compared to 0, which is what oideq() is doing
under the hood. Note that we don't have to catch "!oidcmp()"
separately; coccinelle's standard isomorphisms make sure the
two are treated equivalently.
I say "almost" because I did hand-edit the coccinelle output
to fix up a few style violations (it mostly keeps the
original formatting, but sometimes unwraps long lines).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-28 21:22:40 +00:00
|
|
|
if (oideq(&info->base_oid, &obj_list[nr].oid) ||
|
2006-09-21 04:07:39 +00:00
|
|
|
info->base_offset == obj_list[nr].offset) {
|
2005-06-29 03:34:23 +00:00
|
|
|
*p = info->next;
|
|
|
|
p = &delta_list;
|
2006-09-21 04:07:39 +00:00
|
|
|
resolve_delta(info->nr, type, data, size,
|
|
|
|
info->delta, info->size);
|
2005-06-29 03:34:23 +00:00
|
|
|
free(info);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
p = &info->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-26 19:55:59 +00:00
|
|
|
static void unpack_non_delta_entry(enum object_type type, unsigned long size,
|
2006-09-21 04:07:39 +00:00
|
|
|
unsigned nr)
|
2005-06-29 03:34:23 +00:00
|
|
|
{
|
|
|
|
void *buf = get_data(size);
|
2005-06-26 11:29:18 +00:00
|
|
|
|
2022-06-11 02:44:16 +00:00
|
|
|
if (buf)
|
2007-02-26 19:55:59 +00:00
|
|
|
write_object(nr, type, buf, size);
|
2005-06-26 11:29:18 +00:00
|
|
|
}
|
|
|
|
|
unpack-objects: use stream_loose_object() to unpack large objects
Make use of the stream_loose_object() function introduced in the
preceding commit to unpack large objects. Before this we'd need to
malloc() the size of the blob before unpacking it, which could cause
OOM with very large blobs.
We could use the new streaming interface to unpack all blobs, but
doing so would be much slower, as demonstrated e.g. with this
benchmark using git-hyperfine[0]:
rm -rf /tmp/scalar.git &&
git clone --bare https://github.com/Microsoft/scalar.git /tmp/scalar.git &&
mv /tmp/scalar.git/objects/pack/*.pack /tmp/scalar.git/my.pack &&
git hyperfine \
-r 2 --warmup 1 \
-L rev origin/master,HEAD -L v "10,512,1k,1m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/scalar.git/my.pack'
Here we'll perform worse with lower core.bigFileThreshold settings
with this change in terms of speed, but we're getting lower memory use
in return:
Summary
'./git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' ran
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.02 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.02 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.09 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.10 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.11 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
A better benchmark to demonstrate the benefits of that this one, which
creates an artificial repo with a 1, 25, 50, 75 and 100MB blob:
rm -rf /tmp/repo &&
git init /tmp/repo &&
(
cd /tmp/repo &&
for i in 1 25 50 75 100
do
dd if=/dev/urandom of=blob.$i count=$(($i*1024)) bs=1024
done &&
git add blob.* &&
git commit -mblobs &&
git gc &&
PACK=$(echo .git/objects/pack/pack-*.pack) &&
cp "$PACK" my.pack
) &&
git hyperfine \
--show-output \
-L rev origin/master,HEAD -L v "512,50m,100m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum'
Using this test we'll always use >100MB of memory on
origin/master (around ~105MB), but max out at e.g. ~55MB if we set
core.bigFileThreshold=50m.
The relevant "Maximum resident set size" lines were manually added
below the relevant benchmark:
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' ran
Maximum resident set size (kbytes): 107080
1.02 ± 0.78 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 106968
1.09 ± 0.79 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 107032
1.42 ± 1.07 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 107072
1.83 ± 1.02 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 55704
2.16 ± 1.19 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 4564
This shows that if you have enough memory this new streaming method is
slower the lower you set the streaming threshold, but the benefit is
more bounded memory use.
An earlier version of this patch introduced a new
"core.bigFileStreamingThreshold" instead of re-using the existing
"core.bigFileThreshold" variable[1]. As noted in a detailed overview
of its users in [2] using it has several different meanings.
Still, we consider it good enough to simply re-use it. While it's
possible that someone might want to e.g. consider objects "small" for
the purposes of diffing but "big" for the purposes of writing them
such use-cases are probably too obscure to worry about. We can always
split up "core.bigFileThreshold" in the future if there's a need for
that.
0. https://github.com/avar/git-hyperfine/
1. https://lore.kernel.org/git/20211210103435.83656-1-chiyutianyi@gmail.com/
2. https://lore.kernel.org/git/20220120112114.47618-5-chiyutianyi@gmail.com/
Helped-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Helped-by: Derrick Stolee <stolee@gmail.com>
Helped-by: Jiang Xin <zhiyou.jx@alibaba-inc.com>
Signed-off-by: Han Xin <chiyutianyi@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-11 02:44:21 +00:00
|
|
|
struct input_zstream_data {
|
|
|
|
git_zstream *zstream;
|
|
|
|
unsigned char buf[8192];
|
|
|
|
int status;
|
|
|
|
};
|
|
|
|
|
|
|
|
static const void *feed_input_zstream(struct input_stream *in_stream,
|
|
|
|
unsigned long *readlen)
|
|
|
|
{
|
|
|
|
struct input_zstream_data *data = in_stream->data;
|
|
|
|
git_zstream *zstream = data->zstream;
|
|
|
|
void *in = fill(1);
|
|
|
|
|
|
|
|
if (in_stream->is_finished) {
|
|
|
|
*readlen = 0;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
zstream->next_out = data->buf;
|
|
|
|
zstream->avail_out = sizeof(data->buf);
|
|
|
|
zstream->next_in = in;
|
|
|
|
zstream->avail_in = len;
|
|
|
|
|
|
|
|
data->status = git_inflate(zstream, 0);
|
|
|
|
|
|
|
|
in_stream->is_finished = data->status != Z_OK;
|
|
|
|
use(len - zstream->avail_in);
|
|
|
|
*readlen = sizeof(data->buf) - zstream->avail_out;
|
|
|
|
|
|
|
|
return data->buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void stream_blob(unsigned long size, unsigned nr)
|
|
|
|
{
|
|
|
|
git_zstream zstream = { 0 };
|
|
|
|
struct input_zstream_data data = { 0 };
|
|
|
|
struct input_stream in_stream = {
|
|
|
|
.read = feed_input_zstream,
|
|
|
|
.data = &data,
|
|
|
|
};
|
|
|
|
struct obj_info *info = &obj_list[nr];
|
|
|
|
|
|
|
|
data.zstream = &zstream;
|
|
|
|
git_inflate_init(&zstream);
|
|
|
|
|
|
|
|
if (stream_loose_object(&in_stream, size, &info->oid))
|
|
|
|
die(_("failed to write object in stream"));
|
|
|
|
|
|
|
|
if (data.status != Z_STREAM_END)
|
|
|
|
die(_("inflate returned (%d)"), data.status);
|
|
|
|
git_inflate_end(&zstream);
|
|
|
|
|
|
|
|
if (strict) {
|
|
|
|
struct blob *blob = lookup_blob(the_repository, &info->oid);
|
|
|
|
|
|
|
|
if (!blob)
|
|
|
|
die(_("invalid blob object from stream"));
|
|
|
|
blob->object.flags |= FLAG_WRITTEN;
|
|
|
|
}
|
|
|
|
info->obj = NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-06 22:10:12 +00:00
|
|
|
static int resolve_against_held(unsigned nr, const struct object_id *base,
|
2008-03-05 07:46:51 +00:00
|
|
|
void *delta_data, unsigned long delta_size)
|
|
|
|
{
|
|
|
|
struct object *obj;
|
|
|
|
struct obj_buffer *obj_buffer;
|
2019-06-20 07:41:14 +00:00
|
|
|
obj = lookup_object(the_repository, base);
|
2008-03-05 07:46:51 +00:00
|
|
|
if (!obj)
|
|
|
|
return 0;
|
|
|
|
obj_buffer = lookup_object_buffer(obj);
|
|
|
|
if (!obj_buffer)
|
|
|
|
return 0;
|
|
|
|
resolve_delta(nr, obj->type, obj_buffer->buffer,
|
|
|
|
obj_buffer->size, delta_data, delta_size);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2007-02-26 19:55:59 +00:00
|
|
|
static void unpack_delta_entry(enum object_type type, unsigned long delta_size,
|
2006-09-21 04:07:39 +00:00
|
|
|
unsigned nr)
|
2005-06-26 11:29:18 +00:00
|
|
|
{
|
2005-06-29 03:34:23 +00:00
|
|
|
void *delta_data, *base;
|
|
|
|
unsigned long base_size;
|
2017-05-06 22:10:12 +00:00
|
|
|
struct object_id base_oid;
|
2005-06-26 11:29:18 +00:00
|
|
|
|
2007-02-26 19:55:59 +00:00
|
|
|
if (type == OBJ_REF_DELTA) {
|
2021-04-26 01:02:50 +00:00
|
|
|
oidread(&base_oid, fill(the_hash_algo->rawsz));
|
2018-02-01 02:18:40 +00:00
|
|
|
use(the_hash_algo->rawsz);
|
2006-09-21 04:07:39 +00:00
|
|
|
delta_data = get_data(delta_size);
|
2022-06-11 02:44:16 +00:00
|
|
|
if (!delta_data)
|
2006-09-21 04:07:39 +00:00
|
|
|
return;
|
2023-03-28 13:58:50 +00:00
|
|
|
if (repo_has_object_file(the_repository, &base_oid))
|
2008-03-05 07:46:51 +00:00
|
|
|
; /* Ok we have this one */
|
2017-05-06 22:10:12 +00:00
|
|
|
else if (resolve_against_held(nr, &base_oid,
|
2008-03-05 07:46:51 +00:00
|
|
|
delta_data, delta_size))
|
|
|
|
return; /* we are done */
|
|
|
|
else {
|
|
|
|
/* cannot resolve yet --- queue it */
|
2017-05-06 22:10:12 +00:00
|
|
|
oidclr(&obj_list[nr].oid);
|
|
|
|
add_delta_to_list(nr, &base_oid, 0, delta_data, delta_size);
|
2006-09-21 04:07:39 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsigned base_found = 0;
|
|
|
|
unsigned char *pack, c;
|
2007-04-09 05:06:30 +00:00
|
|
|
off_t base_offset;
|
2006-09-21 04:07:39 +00:00
|
|
|
unsigned lo, mid, hi;
|
2005-06-26 15:40:08 +00:00
|
|
|
|
2006-09-21 04:07:39 +00:00
|
|
|
pack = fill(1);
|
|
|
|
c = *pack;
|
|
|
|
use(1);
|
|
|
|
base_offset = c & 127;
|
|
|
|
while (c & 128) {
|
|
|
|
base_offset += 1;
|
2007-04-09 05:06:29 +00:00
|
|
|
if (!base_offset || MSB(base_offset, 7))
|
2006-09-21 04:07:39 +00:00
|
|
|
die("offset value overflow for delta base object");
|
|
|
|
pack = fill(1);
|
|
|
|
c = *pack;
|
|
|
|
use(1);
|
|
|
|
base_offset = (base_offset << 7) + (c & 127);
|
|
|
|
}
|
|
|
|
base_offset = obj_list[nr].offset - base_offset;
|
2008-10-29 23:02:45 +00:00
|
|
|
if (base_offset <= 0 || base_offset >= obj_list[nr].offset)
|
|
|
|
die("offset value out of bound for delta base object");
|
2005-06-26 11:29:18 +00:00
|
|
|
|
2006-09-21 04:07:39 +00:00
|
|
|
delta_data = get_data(delta_size);
|
2022-06-11 02:44:16 +00:00
|
|
|
if (!delta_data)
|
2006-09-21 04:07:39 +00:00
|
|
|
return;
|
|
|
|
lo = 0;
|
|
|
|
hi = nr;
|
|
|
|
while (lo < hi) {
|
2017-10-08 18:29:37 +00:00
|
|
|
mid = lo + (hi - lo) / 2;
|
2006-09-21 04:07:39 +00:00
|
|
|
if (base_offset < obj_list[mid].offset) {
|
|
|
|
hi = mid;
|
|
|
|
} else if (base_offset > obj_list[mid].offset) {
|
|
|
|
lo = mid + 1;
|
|
|
|
} else {
|
2017-05-06 22:10:12 +00:00
|
|
|
oidcpy(&base_oid, &obj_list[mid].oid);
|
|
|
|
base_found = !is_null_oid(&base_oid);
|
2006-09-21 04:07:39 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!base_found) {
|
2008-03-05 07:46:51 +00:00
|
|
|
/*
|
|
|
|
* The delta base object is itself a delta that
|
|
|
|
* has not been resolved yet.
|
|
|
|
*/
|
2017-05-06 22:10:12 +00:00
|
|
|
oidclr(&obj_list[nr].oid);
|
2021-04-26 01:02:56 +00:00
|
|
|
add_delta_to_list(nr, null_oid(), base_offset,
|
|
|
|
delta_data, delta_size);
|
2006-09-21 04:07:39 +00:00
|
|
|
return;
|
2008-02-25 21:46:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-06 22:10:12 +00:00
|
|
|
if (resolve_against_held(nr, &base_oid, delta_data, delta_size))
|
2008-03-05 07:46:51 +00:00
|
|
|
return;
|
2006-09-21 04:07:39 +00:00
|
|
|
|
2023-03-28 13:58:50 +00:00
|
|
|
base = repo_read_object_file(the_repository, &base_oid, &type,
|
|
|
|
&base_size);
|
2006-09-04 05:55:54 +00:00
|
|
|
if (!base) {
|
|
|
|
error("failed to read delta-pack base object %s",
|
2017-05-06 22:10:12 +00:00
|
|
|
oid_to_hex(&base_oid));
|
2006-09-13 19:59:20 +00:00
|
|
|
if (!recover)
|
2006-09-04 05:55:54 +00:00
|
|
|
exit(1);
|
|
|
|
has_errors = 1;
|
|
|
|
return;
|
|
|
|
}
|
2006-09-21 04:07:39 +00:00
|
|
|
resolve_delta(nr, type, base, base_size, delta_data, delta_size);
|
2005-08-03 12:11:00 +00:00
|
|
|
free(base);
|
2005-06-26 11:29:18 +00:00
|
|
|
}
|
|
|
|
|
2007-04-18 18:27:45 +00:00
|
|
|
static void unpack_one(unsigned nr)
|
2005-06-26 11:29:18 +00:00
|
|
|
{
|
2005-06-29 05:15:57 +00:00
|
|
|
unsigned shift;
|
Fix big left-shifts of unsigned char
Shifting 'unsigned char' or 'unsigned short' left can result in sign
extension errors, since the C integer promotion rules means that the
unsigned char/short will get implicitly promoted to a signed 'int' due to
the shift (or due to other operations).
This normally doesn't matter, but if you shift things up sufficiently, it
will now set the sign bit in 'int', and a subsequent cast to a bigger type
(eg 'long' or 'unsigned long') will now sign-extend the value despite the
original expression being unsigned.
One example of this would be something like
unsigned long size;
unsigned char c;
size += c << 24;
where despite all the variables being unsigned, 'c << 24' ends up being a
signed entity, and will get sign-extended when then doing the addition in
an 'unsigned long' type.
Since git uses 'unsigned char' pointers extensively, we actually have this
bug in a couple of places.
I may have missed some, but this is the result of looking at
git grep '[^0-9 ][ ]*<<[ ][a-z]' -- '*.c' '*.h'
git grep '<<[ ]*24'
which catches at least the common byte cases (shifting variables by a
variable amount, and shifting by 24 bits).
I also grepped for just 'unsigned char' variables in general, and
converted the ones that most obviously ended up getting implicitly cast
immediately anyway (eg hash_name(), encode_85()).
In addition to just avoiding 'unsigned char', this patch also tries to use
a common idiom for the delta header size thing. We had three different
variations on it: "& 0x7fUL" in one place (getting the sign extension
right), and "& ~0x80" and "& 0x7f" in two other places (not getting it
right). Apart from making them all just avoid using "unsigned char" at
all, I also unified them to then use a simple "& 0x7f".
I considered making a sparse extension which warns about doing implicit
casts from unsigned types to signed types, but it gets rather complex very
quickly, so this is just a hack.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-06-18 00:22:27 +00:00
|
|
|
unsigned char *pack;
|
|
|
|
unsigned long size, c;
|
2005-06-29 03:34:23 +00:00
|
|
|
enum object_type type;
|
2005-06-25 22:59:31 +00:00
|
|
|
|
2006-09-21 04:07:39 +00:00
|
|
|
obj_list[nr].offset = consumed_bytes;
|
|
|
|
|
2005-06-29 03:34:23 +00:00
|
|
|
pack = fill(1);
|
|
|
|
c = *pack;
|
|
|
|
use(1);
|
2005-06-28 21:21:02 +00:00
|
|
|
type = (c >> 4) & 7;
|
|
|
|
size = (c & 15);
|
2005-06-29 05:15:57 +00:00
|
|
|
shift = 4;
|
2005-06-28 21:21:02 +00:00
|
|
|
while (c & 0x80) {
|
2005-06-29 03:34:23 +00:00
|
|
|
pack = fill(1);
|
2006-09-21 04:07:39 +00:00
|
|
|
c = *pack;
|
2005-06-29 03:34:23 +00:00
|
|
|
use(1);
|
2005-06-29 05:15:57 +00:00
|
|
|
size += (c & 0x7f) << shift;
|
|
|
|
shift += 7;
|
2005-06-28 21:21:02 +00:00
|
|
|
}
|
2007-04-18 18:27:45 +00:00
|
|
|
|
2005-06-28 21:21:02 +00:00
|
|
|
switch (type) {
|
unpack-objects: use stream_loose_object() to unpack large objects
Make use of the stream_loose_object() function introduced in the
preceding commit to unpack large objects. Before this we'd need to
malloc() the size of the blob before unpacking it, which could cause
OOM with very large blobs.
We could use the new streaming interface to unpack all blobs, but
doing so would be much slower, as demonstrated e.g. with this
benchmark using git-hyperfine[0]:
rm -rf /tmp/scalar.git &&
git clone --bare https://github.com/Microsoft/scalar.git /tmp/scalar.git &&
mv /tmp/scalar.git/objects/pack/*.pack /tmp/scalar.git/my.pack &&
git hyperfine \
-r 2 --warmup 1 \
-L rev origin/master,HEAD -L v "10,512,1k,1m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/scalar.git/my.pack'
Here we'll perform worse with lower core.bigFileThreshold settings
with this change in terms of speed, but we're getting lower memory use
in return:
Summary
'./git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master' ran
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.01 ± 0.02 times faster than './git -C dest.git -c core.bigFileThreshold=1m unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.02 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'origin/master'
1.09 ± 0.01 times faster than './git -C dest.git -c core.bigFileThreshold=1k unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.10 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
1.11 ± 0.00 times faster than './git -C dest.git -c core.bigFileThreshold=10 unpack-objects </tmp/scalar.git/my.pack' in 'HEAD'
A better benchmark to demonstrate the benefits of that this one, which
creates an artificial repo with a 1, 25, 50, 75 and 100MB blob:
rm -rf /tmp/repo &&
git init /tmp/repo &&
(
cd /tmp/repo &&
for i in 1 25 50 75 100
do
dd if=/dev/urandom of=blob.$i count=$(($i*1024)) bs=1024
done &&
git add blob.* &&
git commit -mblobs &&
git gc &&
PACK=$(echo .git/objects/pack/pack-*.pack) &&
cp "$PACK" my.pack
) &&
git hyperfine \
--show-output \
-L rev origin/master,HEAD -L v "512,50m,100m" \
-s 'make' \
-p 'git init --bare dest.git' \
-c 'rm -rf dest.git' \
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold={v} unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum'
Using this test we'll always use >100MB of memory on
origin/master (around ~105MB), but max out at e.g. ~55MB if we set
core.bigFileThreshold=50m.
The relevant "Maximum resident set size" lines were manually added
below the relevant benchmark:
'/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master' ran
Maximum resident set size (kbytes): 107080
1.02 ± 0.78 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 106968
1.09 ± 0.79 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'origin/master'
Maximum resident set size (kbytes): 107032
1.42 ± 1.07 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=100m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 107072
1.83 ± 1.02 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=50m unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 55704
2.16 ± 1.19 times faster than '/usr/bin/time -v ./git -C dest.git -c core.bigFileThreshold=512 unpack-objects </tmp/repo/my.pack 2>&1 | grep Maximum' in 'HEAD'
Maximum resident set size (kbytes): 4564
This shows that if you have enough memory this new streaming method is
slower the lower you set the streaming threshold, but the benefit is
more bounded memory use.
An earlier version of this patch introduced a new
"core.bigFileStreamingThreshold" instead of re-using the existing
"core.bigFileThreshold" variable[1]. As noted in a detailed overview
of its users in [2] using it has several different meanings.
Still, we consider it good enough to simply re-use it. While it's
possible that someone might want to e.g. consider objects "small" for
the purposes of diffing but "big" for the purposes of writing them
such use-cases are probably too obscure to worry about. We can always
split up "core.bigFileThreshold" in the future if there's a need for
that.
0. https://github.com/avar/git-hyperfine/
1. https://lore.kernel.org/git/20211210103435.83656-1-chiyutianyi@gmail.com/
2. https://lore.kernel.org/git/20220120112114.47618-5-chiyutianyi@gmail.com/
Helped-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Helped-by: Derrick Stolee <stolee@gmail.com>
Helped-by: Jiang Xin <zhiyou.jx@alibaba-inc.com>
Signed-off-by: Han Xin <chiyutianyi@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-11 02:44:21 +00:00
|
|
|
case OBJ_BLOB:
|
|
|
|
if (!dry_run && size > big_file_threshold) {
|
|
|
|
stream_blob(size, nr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* fallthrough */
|
2005-06-28 21:21:02 +00:00
|
|
|
case OBJ_COMMIT:
|
|
|
|
case OBJ_TREE:
|
|
|
|
case OBJ_TAG:
|
2006-09-21 04:07:39 +00:00
|
|
|
unpack_non_delta_entry(type, size, nr);
|
2005-06-28 21:21:02 +00:00
|
|
|
return;
|
2006-09-21 04:06:49 +00:00
|
|
|
case OBJ_REF_DELTA:
|
2006-09-21 04:07:39 +00:00
|
|
|
case OBJ_OFS_DELTA:
|
|
|
|
unpack_delta_entry(type, size, nr);
|
2005-06-28 21:21:02 +00:00
|
|
|
return;
|
2005-06-29 03:34:23 +00:00
|
|
|
default:
|
2006-09-04 05:55:54 +00:00
|
|
|
error("bad object type %d", type);
|
|
|
|
has_errors = 1;
|
2006-09-13 19:59:20 +00:00
|
|
|
if (recover)
|
2006-09-04 05:55:54 +00:00
|
|
|
return;
|
|
|
|
exit(1);
|
2005-06-25 22:59:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unpack_all(void)
|
|
|
|
{
|
2005-06-29 03:34:23 +00:00
|
|
|
int i;
|
|
|
|
struct pack_header *hdr = fill(sizeof(struct pack_header));
|
2008-02-25 21:46:11 +00:00
|
|
|
|
|
|
|
nr_objects = ntohl(hdr->hdr_entries);
|
2005-06-29 03:34:23 +00:00
|
|
|
|
|
|
|
if (ntohl(hdr->hdr_signature) != PACK_SIGNATURE)
|
|
|
|
die("bad pack file");
|
2006-02-09 22:50:04 +00:00
|
|
|
if (!pack_version_ok(hdr->hdr_version))
|
2008-07-03 15:52:09 +00:00
|
|
|
die("unknown pack file version %"PRIu32,
|
|
|
|
ntohl(hdr->hdr_version));
|
2007-04-18 18:27:45 +00:00
|
|
|
use(sizeof(struct pack_header));
|
2005-06-29 03:34:23 +00:00
|
|
|
|
2007-04-20 18:10:07 +00:00
|
|
|
if (!quiet)
|
2014-02-21 12:50:18 +00:00
|
|
|
progress = start_progress(_("Unpacking objects"), nr_objects);
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(obj_list, nr_objects);
|
2022-04-05 05:20:13 +00:00
|
|
|
begin_odb_transaction();
|
2007-04-18 18:27:45 +00:00
|
|
|
for (i = 0; i < nr_objects; i++) {
|
|
|
|
unpack_one(i);
|
2007-10-30 18:57:33 +00:00
|
|
|
display_progress(progress, i + 1);
|
2007-04-18 18:27:45 +00:00
|
|
|
}
|
2022-04-05 05:20:13 +00:00
|
|
|
end_odb_transaction();
|
2007-10-30 18:57:33 +00:00
|
|
|
stop_progress(&progress);
|
2007-04-18 18:27:45 +00:00
|
|
|
|
2005-06-29 03:34:23 +00:00
|
|
|
if (delta_list)
|
|
|
|
die("unresolved deltas left after unpacking");
|
2005-06-25 22:59:31 +00:00
|
|
|
}
|
|
|
|
|
builtins: mark unused prefix parameters
All builtins receive a "prefix" parameter, but it is only useful if they
need to adjust filenames given by the user on the command line. For
builtins that do not even call parse_options(), they often don't look at
the prefix at all, and -Wunused-parameter complains.
Let's annotate those to silence the compiler warning. I gave a quick
scan of each of these cases, and it seems like they don't have anything
they _should_ be using the prefix for (i.e., there is no hidden bug that
we are missing). The only questionable cases I saw were:
- in git-unpack-file, we create a tempfile which will always be at the
root of the repository, even if the command is run from a subdir.
Arguably this should be created in the subdir from which we're run
(as we report the path only as a relative name). However, nobody has
complained, and I'm hesitant to change something that is deep
plumbing going back to April 2005 (though I think within our
scripts, the sole caller in git-merge-one-file would be OK, as it
moves to the toplevel itself).
- in fetch-pack, local-filesystem remotes are taken as relative to the
project root, not the current directory. So:
git init server.git
[...put stuff in server.git...]
git init client.git
cd client.git
mkdir subdir
cd subdir
git fetch-pack ../../server.git ...
won't work, as we quietly move to the top of the repository before
interpreting the path (so "../server.git" would work). This is
weird, but again, nobody has complained and this is how it has
always worked. And this is how "git fetch" works, too. Plus it
raises questions about how a configured remote like:
git config remote.origin.url ../server.git
should behave. I can certainly come up with a reasonable set of
behavior, but it may not be worth stirring up complications in a
plumbing tool.
So I've left the behavior untouched in both of those cases. If anybody
really wants to revisit them, it's easy enough to drop the UNUSED
marker. This commit is just about removing them as obstacles to turning
on -Wunused-parameter all the time.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-03-28 20:56:55 +00:00
|
|
|
int cmd_unpack_objects(int argc, const char **argv, const char *prefix UNUSED)
|
2005-06-25 22:27:14 +00:00
|
|
|
{
|
|
|
|
int i;
|
2017-05-06 22:10:12 +00:00
|
|
|
struct object_id oid;
|
2005-06-25 22:27:14 +00:00
|
|
|
|
2023-06-06 13:24:35 +00:00
|
|
|
disable_replace_refs();
|
2009-01-23 09:07:46 +00:00
|
|
|
|
2008-05-14 17:46:53 +00:00
|
|
|
git_config(git_default_config, NULL);
|
2005-11-26 08:50:02 +00:00
|
|
|
|
2006-01-07 02:53:16 +00:00
|
|
|
quiet = !isatty(2);
|
|
|
|
|
2005-06-25 22:27:14 +00:00
|
|
|
for (i = 1 ; i < argc; i++) {
|
|
|
|
const char *arg = argv[i];
|
|
|
|
|
|
|
|
if (*arg == '-') {
|
2005-06-25 22:59:31 +00:00
|
|
|
if (!strcmp(arg, "-n")) {
|
|
|
|
dry_run = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2005-07-09 17:43:02 +00:00
|
|
|
if (!strcmp(arg, "-q")) {
|
|
|
|
quiet = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2006-09-04 05:55:54 +00:00
|
|
|
if (!strcmp(arg, "-r")) {
|
2006-09-13 19:59:20 +00:00
|
|
|
recover = 1;
|
2006-09-04 05:55:54 +00:00
|
|
|
continue;
|
|
|
|
}
|
2008-02-25 21:46:11 +00:00
|
|
|
if (!strcmp(arg, "--strict")) {
|
|
|
|
strict = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2015-06-22 15:25:31 +00:00
|
|
|
if (skip_prefix(arg, "--strict=", &arg)) {
|
|
|
|
strict = 1;
|
|
|
|
fsck_set_msg_types(&fsck_options, arg);
|
|
|
|
continue;
|
|
|
|
}
|
2013-11-30 20:55:40 +00:00
|
|
|
if (starts_with(arg, "--pack_header=")) {
|
2006-11-01 22:06:20 +00:00
|
|
|
struct pack_header *hdr;
|
|
|
|
char *c;
|
|
|
|
|
|
|
|
hdr = (struct pack_header *)buffer;
|
|
|
|
hdr->hdr_signature = htonl(PACK_SIGNATURE);
|
|
|
|
hdr->hdr_version = htonl(strtoul(arg + 14, &c, 10));
|
|
|
|
if (*c != ',')
|
|
|
|
die("bad %s", arg);
|
|
|
|
hdr->hdr_entries = htonl(strtoul(c + 1, &c, 10));
|
|
|
|
if (*c)
|
|
|
|
die("bad %s", arg);
|
|
|
|
len = sizeof(*hdr);
|
|
|
|
continue;
|
|
|
|
}
|
2016-08-24 18:41:56 +00:00
|
|
|
if (skip_prefix(arg, "--max-input-size=", &arg)) {
|
|
|
|
max_input_size = strtoumax(arg, NULL, 10);
|
|
|
|
continue;
|
|
|
|
}
|
2005-06-25 22:27:14 +00:00
|
|
|
usage(unpack_usage);
|
|
|
|
}
|
2005-06-29 03:34:23 +00:00
|
|
|
|
|
|
|
/* We don't take any non-flag arguments now.. Maybe some day */
|
2005-06-25 22:27:14 +00:00
|
|
|
usage(unpack_usage);
|
2005-06-29 03:34:23 +00:00
|
|
|
}
|
2018-02-01 02:18:40 +00:00
|
|
|
the_hash_algo->init_fn(&ctx);
|
2005-06-25 22:59:31 +00:00
|
|
|
unpack_all();
|
2018-02-01 02:18:40 +00:00
|
|
|
the_hash_algo->update_fn(&ctx, buffer, offset);
|
2021-04-26 01:02:53 +00:00
|
|
|
the_hash_algo->final_oid_fn(&oid, &ctx);
|
2018-05-04 23:40:08 +00:00
|
|
|
if (strict) {
|
2008-02-25 21:46:11 +00:00
|
|
|
write_rest();
|
2018-05-04 23:40:08 +00:00
|
|
|
if (fsck_finish(&fsck_options))
|
|
|
|
die(_("fsck error in pack objects"));
|
|
|
|
}
|
2018-08-28 21:22:52 +00:00
|
|
|
if (!hasheq(fill(the_hash_algo->rawsz), oid.hash))
|
2005-06-29 03:34:23 +00:00
|
|
|
die("final sha1 did not match");
|
2018-02-01 02:18:40 +00:00
|
|
|
use(the_hash_algo->rawsz);
|
2005-06-29 03:34:23 +00:00
|
|
|
|
|
|
|
/* Write the last part of the buffer to stdout */
|
|
|
|
while (len) {
|
2005-12-20 00:18:28 +00:00
|
|
|
int ret = xwrite(1, buffer + offset, len);
|
|
|
|
if (ret <= 0)
|
2005-06-29 03:34:23 +00:00
|
|
|
break;
|
|
|
|
len -= ret;
|
|
|
|
offset += ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* All done */
|
2006-09-04 05:55:54 +00:00
|
|
|
return has_errors;
|
2005-06-25 22:27:14 +00:00
|
|
|
}
|