2006-01-07 09:33:54 +00:00
|
|
|
#include "cache.h"
|
2005-04-18 18:39:48 +00:00
|
|
|
#include "object.h"
|
2005-04-28 14:46:33 +00:00
|
|
|
#include "blob.h"
|
|
|
|
#include "tree.h"
|
|
|
|
#include "commit.h"
|
|
|
|
#include "tag.h"
|
2005-04-18 18:39:48 +00:00
|
|
|
|
2006-06-30 18:20:33 +00:00
|
|
|
static struct object **obj_hash;
|
|
|
|
static int nr_objs, obj_hash_size;
|
2006-06-30 04:38:55 +00:00
|
|
|
|
|
|
|
unsigned int get_max_object_index(void)
|
|
|
|
{
|
2006-06-30 18:20:33 +00:00
|
|
|
return obj_hash_size;
|
2006-06-30 04:38:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct object *get_indexed_object(unsigned int idx)
|
|
|
|
{
|
2006-06-30 18:20:33 +00:00
|
|
|
return obj_hash[idx];
|
2006-06-30 04:38:55 +00:00
|
|
|
}
|
2005-04-18 18:39:48 +00:00
|
|
|
|
2007-02-26 19:55:58 +00:00
|
|
|
static const char *object_type_strings[] = {
|
|
|
|
NULL, /* OBJ_NONE = 0 */
|
|
|
|
"commit", /* OBJ_COMMIT = 1 */
|
|
|
|
"tree", /* OBJ_TREE = 2 */
|
|
|
|
"blob", /* OBJ_BLOB = 3 */
|
|
|
|
"tag", /* OBJ_TAG = 4 */
|
Shrink "struct object" a bit
This shrinks "struct object" by a small amount, by getting rid of the
"struct type *" pointer and replacing it with a 3-bit bitfield instead.
In addition, we merge the bitfields and the "flags" field, which
incidentally should also remove a useless 4-byte padding from the object
when in 64-bit mode.
Now, our "struct object" is still too damn large, but it's now less
obviously bloated, and of the remaining fields, only the "util" (which is
not used by most things) is clearly something that should be eventually
discarded.
This shrinks the "git-rev-list --all" memory use by about 2.5% on the
kernel archive (and, perhaps more importantly, on the larger mozilla
archive). That may not sound like much, but I suspect it's more on a
64-bit platform.
There are other remaining inefficiencies (the parent lists, for example,
probably have horrible malloc overhead), but this was pretty obvious.
Most of the patch is just changing the comparison of the "type" pointer
from one of the constant string pointers to the appropriate new TYPE_xxx
small integer constant.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-14 23:45:13 +00:00
|
|
|
};
|
|
|
|
|
2007-02-26 19:55:58 +00:00
|
|
|
const char *typename(unsigned int type)
|
|
|
|
{
|
|
|
|
if (type >= ARRAY_SIZE(object_type_strings))
|
|
|
|
return NULL;
|
|
|
|
return object_type_strings[type];
|
|
|
|
}
|
|
|
|
|
|
|
|
int type_from_string(const char *str)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 1; i < ARRAY_SIZE(object_type_strings); i++)
|
|
|
|
if (!strcmp(str, object_type_strings[i]))
|
|
|
|
return i;
|
|
|
|
die("invalid object type \"%s\"", str);
|
|
|
|
}
|
|
|
|
|
2006-06-30 18:20:33 +00:00
|
|
|
static unsigned int hash_obj(struct object *obj, unsigned int n)
|
|
|
|
{
|
2009-05-12 01:17:38 +00:00
|
|
|
unsigned int hash;
|
|
|
|
memcpy(&hash, obj->sha1, sizeof(unsigned int));
|
2006-06-30 18:20:33 +00:00
|
|
|
return hash % n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void insert_obj_hash(struct object *obj, struct object **hash, unsigned int size)
|
|
|
|
{
|
2009-05-19 04:34:02 +00:00
|
|
|
unsigned int j = hash_obj(obj, size);
|
2006-06-30 18:20:33 +00:00
|
|
|
|
|
|
|
while (hash[j]) {
|
|
|
|
j++;
|
|
|
|
if (j >= size)
|
|
|
|
j = 0;
|
|
|
|
}
|
|
|
|
hash[j] = obj;
|
|
|
|
}
|
|
|
|
|
2009-05-19 04:34:02 +00:00
|
|
|
static unsigned int hashtable_index(const unsigned char *sha1)
|
2006-02-12 01:57:57 +00:00
|
|
|
{
|
2006-02-12 02:51:19 +00:00
|
|
|
unsigned int i;
|
|
|
|
memcpy(&i, sha1, sizeof(unsigned int));
|
2009-05-19 04:34:02 +00:00
|
|
|
return i % obj_hash_size;
|
2006-02-12 01:57:57 +00:00
|
|
|
}
|
|
|
|
|
2006-06-30 18:20:33 +00:00
|
|
|
struct object *lookup_object(const unsigned char *sha1)
|
2005-04-18 18:39:48 +00:00
|
|
|
{
|
lookup_object: prioritize recently found objects
The lookup_object function is backed by a hash table of all
objects we have seen in the program. We manage collisions
with a linear walk over the colliding entries, checking each
with hashcmp(). The main cost of lookup is in these
hashcmp() calls; finding our item in the first slot is
cheaper than finding it in the second slot, which is cheaper
than the third, and so on.
If we assume that there is some locality to the object
lookups (e.g., if X and Y collide, and we have just looked
up X, the next lookup is more likely to be for X than for
Y), then we can improve our average lookup speed by checking
X before Y.
This patch does so by swapping a found item to the front of
the collision chain. The p0001 perf test reveals that this
does indeed exploit locality in the case of "rev-list --all
--objects":
Test origin this tree
-------------------------------------------------------------------------
0001.1: rev-list --all 0.40(0.38+0.02) 0.40(0.36+0.03) +0.0%
0001.2: rev-list --all --objects 2.24(2.17+0.05) 1.86(1.79+0.05) -17.0%
This is not surprising, as the full object traversal will
hit the same tree entries over and over (e.g., for every
commit that doesn't change "Documentation/", we will have to
look up the same sha1 just to find out that we already
processed it).
The reason why this technique works (and does not violate
any properties of the hash table) is subtle and bears some
explanation. Let's imagine we get a lookup for sha1 `X`, and
it hashes to bucket `i` in our table. That stretch of the
table may look like:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | B | C | X | ...
-----------------------------------
We start our probe at i, see that B does not match, nor does
C, and finally find X. There may be multiple C's in the
middle, but we know that there are no empty slots (or else
we would not find X at all).
We do not know the original index of B; it may be `i`, or it
may be less than i (e.g., if it were `i-1`, it would collide
with A and spill over into the `i` bucket). So it is
acceptable for us to move it to the right of a contiguous
stretch of entries (because we will find it from a linear
walk starting anywhere at `i` or before), but never to the
left (if we moved it to `i-1`, we would miss it when
starting our walk at `i`).
We do know the original index of X; it is `i`, so it is safe
to place it anywhere in the contiguous stretch between `i`
and where we found it (`i+2` in the this case).
This patch does a pure swap; after finding X in the
situation above, we would end with:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | X | C | B | ...
-----------------------------------
We could instead bump X into the `i` slot, and then shift
the whole contiguous chain down by one, resulting in:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | X | B | C | ...
-----------------------------------
That puts our chain in true most-recently-used order.
However, experiments show that it is not any faster (and in
fact, is slightly slower due to the extra manipulation).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-01 20:34:50 +00:00
|
|
|
unsigned int i, first;
|
2006-06-30 18:20:33 +00:00
|
|
|
struct object *obj;
|
2005-04-18 18:39:48 +00:00
|
|
|
|
2006-06-30 18:20:33 +00:00
|
|
|
if (!obj_hash)
|
|
|
|
return NULL;
|
2005-04-18 18:39:48 +00:00
|
|
|
|
lookup_object: prioritize recently found objects
The lookup_object function is backed by a hash table of all
objects we have seen in the program. We manage collisions
with a linear walk over the colliding entries, checking each
with hashcmp(). The main cost of lookup is in these
hashcmp() calls; finding our item in the first slot is
cheaper than finding it in the second slot, which is cheaper
than the third, and so on.
If we assume that there is some locality to the object
lookups (e.g., if X and Y collide, and we have just looked
up X, the next lookup is more likely to be for X than for
Y), then we can improve our average lookup speed by checking
X before Y.
This patch does so by swapping a found item to the front of
the collision chain. The p0001 perf test reveals that this
does indeed exploit locality in the case of "rev-list --all
--objects":
Test origin this tree
-------------------------------------------------------------------------
0001.1: rev-list --all 0.40(0.38+0.02) 0.40(0.36+0.03) +0.0%
0001.2: rev-list --all --objects 2.24(2.17+0.05) 1.86(1.79+0.05) -17.0%
This is not surprising, as the full object traversal will
hit the same tree entries over and over (e.g., for every
commit that doesn't change "Documentation/", we will have to
look up the same sha1 just to find out that we already
processed it).
The reason why this technique works (and does not violate
any properties of the hash table) is subtle and bears some
explanation. Let's imagine we get a lookup for sha1 `X`, and
it hashes to bucket `i` in our table. That stretch of the
table may look like:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | B | C | X | ...
-----------------------------------
We start our probe at i, see that B does not match, nor does
C, and finally find X. There may be multiple C's in the
middle, but we know that there are no empty slots (or else
we would not find X at all).
We do not know the original index of B; it may be `i`, or it
may be less than i (e.g., if it were `i-1`, it would collide
with A and spill over into the `i` bucket). So it is
acceptable for us to move it to the right of a contiguous
stretch of entries (because we will find it from a linear
walk starting anywhere at `i` or before), but never to the
left (if we moved it to `i-1`, we would miss it when
starting our walk at `i`).
We do know the original index of X; it is `i`, so it is safe
to place it anywhere in the contiguous stretch between `i`
and where we found it (`i+2` in the this case).
This patch does a pure swap; after finding X in the
situation above, we would end with:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | X | C | B | ...
-----------------------------------
We could instead bump X into the `i` slot, and then shift
the whole contiguous chain down by one, resulting in:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | X | B | C | ...
-----------------------------------
That puts our chain in true most-recently-used order.
However, experiments show that it is not any faster (and in
fact, is slightly slower due to the extra manipulation).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-01 20:34:50 +00:00
|
|
|
first = i = hashtable_index(sha1);
|
2006-06-30 18:20:33 +00:00
|
|
|
while ((obj = obj_hash[i]) != NULL) {
|
2006-08-17 18:54:57 +00:00
|
|
|
if (!hashcmp(sha1, obj->sha1))
|
2006-06-30 18:20:33 +00:00
|
|
|
break;
|
2006-02-12 01:57:57 +00:00
|
|
|
i++;
|
2006-06-30 18:20:33 +00:00
|
|
|
if (i == obj_hash_size)
|
2006-02-12 01:57:57 +00:00
|
|
|
i = 0;
|
|
|
|
}
|
lookup_object: prioritize recently found objects
The lookup_object function is backed by a hash table of all
objects we have seen in the program. We manage collisions
with a linear walk over the colliding entries, checking each
with hashcmp(). The main cost of lookup is in these
hashcmp() calls; finding our item in the first slot is
cheaper than finding it in the second slot, which is cheaper
than the third, and so on.
If we assume that there is some locality to the object
lookups (e.g., if X and Y collide, and we have just looked
up X, the next lookup is more likely to be for X than for
Y), then we can improve our average lookup speed by checking
X before Y.
This patch does so by swapping a found item to the front of
the collision chain. The p0001 perf test reveals that this
does indeed exploit locality in the case of "rev-list --all
--objects":
Test origin this tree
-------------------------------------------------------------------------
0001.1: rev-list --all 0.40(0.38+0.02) 0.40(0.36+0.03) +0.0%
0001.2: rev-list --all --objects 2.24(2.17+0.05) 1.86(1.79+0.05) -17.0%
This is not surprising, as the full object traversal will
hit the same tree entries over and over (e.g., for every
commit that doesn't change "Documentation/", we will have to
look up the same sha1 just to find out that we already
processed it).
The reason why this technique works (and does not violate
any properties of the hash table) is subtle and bears some
explanation. Let's imagine we get a lookup for sha1 `X`, and
it hashes to bucket `i` in our table. That stretch of the
table may look like:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | B | C | X | ...
-----------------------------------
We start our probe at i, see that B does not match, nor does
C, and finally find X. There may be multiple C's in the
middle, but we know that there are no empty slots (or else
we would not find X at all).
We do not know the original index of B; it may be `i`, or it
may be less than i (e.g., if it were `i-1`, it would collide
with A and spill over into the `i` bucket). So it is
acceptable for us to move it to the right of a contiguous
stretch of entries (because we will find it from a linear
walk starting anywhere at `i` or before), but never to the
left (if we moved it to `i-1`, we would miss it when
starting our walk at `i`).
We do know the original index of X; it is `i`, so it is safe
to place it anywhere in the contiguous stretch between `i`
and where we found it (`i+2` in the this case).
This patch does a pure swap; after finding X in the
situation above, we would end with:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | X | C | B | ...
-----------------------------------
We could instead bump X into the `i` slot, and then shift
the whole contiguous chain down by one, resulting in:
index | i-1 | i | i+1 | i+2 |
-----------------------------------
entry ... | A | X | B | C | ...
-----------------------------------
That puts our chain in true most-recently-used order.
However, experiments show that it is not any faster (and in
fact, is slightly slower due to the extra manipulation).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-01 20:34:50 +00:00
|
|
|
if (obj && i != first) {
|
|
|
|
/*
|
|
|
|
* Move object to where we started to look for it so
|
|
|
|
* that we do not need to walk the hash table the next
|
|
|
|
* time we look for it.
|
|
|
|
*/
|
|
|
|
struct object *tmp = obj_hash[i];
|
|
|
|
obj_hash[i] = obj_hash[first];
|
|
|
|
obj_hash[first] = tmp;
|
|
|
|
}
|
2006-06-30 18:20:33 +00:00
|
|
|
return obj;
|
2005-04-18 18:39:48 +00:00
|
|
|
}
|
|
|
|
|
2006-06-30 18:20:33 +00:00
|
|
|
static void grow_object_hash(void)
|
2005-04-18 18:39:48 +00:00
|
|
|
{
|
2006-06-30 18:20:33 +00:00
|
|
|
int i;
|
|
|
|
int new_hash_size = obj_hash_size < 32 ? 32 : 2 * obj_hash_size;
|
|
|
|
struct object **new_hash;
|
|
|
|
|
2006-08-28 00:26:07 +00:00
|
|
|
new_hash = xcalloc(new_hash_size, sizeof(struct object *));
|
2006-06-30 18:20:33 +00:00
|
|
|
for (i = 0; i < obj_hash_size; i++) {
|
|
|
|
struct object *obj = obj_hash[i];
|
|
|
|
if (!obj)
|
|
|
|
continue;
|
|
|
|
insert_obj_hash(obj, new_hash, new_hash_size);
|
|
|
|
}
|
|
|
|
free(obj_hash);
|
|
|
|
obj_hash = new_hash;
|
|
|
|
obj_hash_size = new_hash_size;
|
2005-04-18 18:39:48 +00:00
|
|
|
}
|
|
|
|
|
2007-04-17 05:11:43 +00:00
|
|
|
void *create_object(const unsigned char *sha1, int type, void *o)
|
2005-04-18 18:39:48 +00:00
|
|
|
{
|
2007-04-17 05:11:43 +00:00
|
|
|
struct object *obj = o;
|
|
|
|
|
2005-04-18 18:39:48 +00:00
|
|
|
obj->parsed = 0;
|
|
|
|
obj->used = 0;
|
2007-04-17 05:11:43 +00:00
|
|
|
obj->type = type;
|
2006-06-30 18:20:33 +00:00
|
|
|
obj->flags = 0;
|
2006-08-23 06:49:00 +00:00
|
|
|
hashcpy(obj->sha1, sha1);
|
2005-04-18 18:39:48 +00:00
|
|
|
|
2006-06-30 18:20:33 +00:00
|
|
|
if (obj_hash_size - 1 <= nr_objs * 2)
|
|
|
|
grow_object_hash();
|
2005-04-18 18:39:48 +00:00
|
|
|
|
2006-06-30 18:20:33 +00:00
|
|
|
insert_obj_hash(obj, obj_hash, obj_hash_size);
|
2005-04-18 18:39:48 +00:00
|
|
|
nr_objs++;
|
2007-04-17 05:11:43 +00:00
|
|
|
return obj;
|
2005-04-18 18:39:48 +00:00
|
|
|
}
|
|
|
|
|
2005-08-02 23:45:48 +00:00
|
|
|
struct object *lookup_unknown_object(const unsigned char *sha1)
|
|
|
|
{
|
|
|
|
struct object *obj = lookup_object(sha1);
|
2007-04-17 05:11:43 +00:00
|
|
|
if (!obj)
|
|
|
|
obj = create_object(sha1, OBJ_NONE, alloc_object_node());
|
2005-08-02 23:45:48 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2007-02-26 19:55:59 +00:00
|
|
|
struct object *parse_object_buffer(const unsigned char *sha1, enum object_type type, unsigned long size, void *buffer, int *eaten_p)
|
2006-09-15 20:30:02 +00:00
|
|
|
{
|
|
|
|
struct object *obj;
|
2013-07-17 22:09:42 +00:00
|
|
|
*eaten_p = 0;
|
2006-09-15 20:30:02 +00:00
|
|
|
|
2007-12-21 10:56:32 +00:00
|
|
|
obj = NULL;
|
2007-02-26 19:55:59 +00:00
|
|
|
if (type == OBJ_BLOB) {
|
2006-09-15 20:30:02 +00:00
|
|
|
struct blob *blob = lookup_blob(sha1);
|
2007-12-21 10:56:32 +00:00
|
|
|
if (blob) {
|
2008-02-03 21:22:39 +00:00
|
|
|
if (parse_blob_buffer(blob, buffer, size))
|
|
|
|
return NULL;
|
2007-12-21 10:56:32 +00:00
|
|
|
obj = &blob->object;
|
|
|
|
}
|
2007-02-26 19:55:59 +00:00
|
|
|
} else if (type == OBJ_TREE) {
|
2006-09-15 20:30:02 +00:00
|
|
|
struct tree *tree = lookup_tree(sha1);
|
2007-12-21 10:56:32 +00:00
|
|
|
if (tree) {
|
|
|
|
obj = &tree->object;
|
2011-11-17 06:04:13 +00:00
|
|
|
if (!tree->buffer)
|
|
|
|
tree->object.parsed = 0;
|
2007-12-21 10:56:32 +00:00
|
|
|
if (!tree->object.parsed) {
|
2008-02-03 21:22:39 +00:00
|
|
|
if (parse_tree_buffer(tree, buffer, size))
|
|
|
|
return NULL;
|
2013-07-17 22:09:42 +00:00
|
|
|
*eaten_p = 1;
|
2007-12-21 10:56:32 +00:00
|
|
|
}
|
2006-09-15 20:30:02 +00:00
|
|
|
}
|
2007-02-26 19:55:59 +00:00
|
|
|
} else if (type == OBJ_COMMIT) {
|
2006-09-15 20:30:02 +00:00
|
|
|
struct commit *commit = lookup_commit(sha1);
|
2007-12-21 10:56:32 +00:00
|
|
|
if (commit) {
|
2008-02-03 21:22:39 +00:00
|
|
|
if (parse_commit_buffer(commit, buffer, size))
|
|
|
|
return NULL;
|
2007-12-21 10:56:32 +00:00
|
|
|
if (!commit->buffer) {
|
|
|
|
commit->buffer = buffer;
|
2013-07-17 22:09:42 +00:00
|
|
|
*eaten_p = 1;
|
2007-12-21 10:56:32 +00:00
|
|
|
}
|
|
|
|
obj = &commit->object;
|
2006-09-15 20:30:02 +00:00
|
|
|
}
|
2007-02-26 19:55:59 +00:00
|
|
|
} else if (type == OBJ_TAG) {
|
2006-09-15 20:30:02 +00:00
|
|
|
struct tag *tag = lookup_tag(sha1);
|
2007-12-21 10:56:32 +00:00
|
|
|
if (tag) {
|
2008-02-03 21:22:39 +00:00
|
|
|
if (parse_tag_buffer(tag, buffer, size))
|
|
|
|
return NULL;
|
2007-12-21 10:56:32 +00:00
|
|
|
obj = &tag->object;
|
|
|
|
}
|
2006-09-15 20:30:02 +00:00
|
|
|
} else {
|
2012-04-30 00:28:45 +00:00
|
|
|
warning("object %s has unknown type id %d", sha1_to_hex(sha1), type);
|
2006-09-15 20:30:02 +00:00
|
|
|
obj = NULL;
|
|
|
|
}
|
2007-06-06 10:25:17 +00:00
|
|
|
if (obj && obj->type == OBJ_NONE)
|
|
|
|
obj->type = type;
|
2006-09-15 20:30:02 +00:00
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
2013-03-17 08:22:36 +00:00
|
|
|
struct object *parse_object_or_die(const unsigned char *sha1,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
struct object *o = parse_object(sha1);
|
|
|
|
if (o)
|
|
|
|
return o;
|
|
|
|
|
|
|
|
die(_("unable to parse object: %s"), name ? name : sha1_to_hex(sha1));
|
|
|
|
}
|
|
|
|
|
2005-06-03 15:05:39 +00:00
|
|
|
struct object *parse_object(const unsigned char *sha1)
|
2005-04-28 14:46:33 +00:00
|
|
|
{
|
2005-06-27 10:33:33 +00:00
|
|
|
unsigned long size;
|
2007-02-26 19:55:59 +00:00
|
|
|
enum object_type type;
|
2006-09-15 20:30:02 +00:00
|
|
|
int eaten;
|
2011-05-15 19:54:52 +00:00
|
|
|
const unsigned char *repl = lookup_replace_object(sha1);
|
parse_object: try internal cache before reading object db
When parse_object is called, we do the following:
1. read the object data into a buffer via read_sha1_file
2. call parse_object_buffer, which then:
a. calls the appropriate lookup_{commit,tree,blob,tag}
to either create a new "struct object", or to find
an existing one. We know the appropriate type from
the lookup in step 1.
b. calls the appropriate parse_{commit,tree,blob,tag}
to parse the buffer for the new (or existing) object
In step 2b, all of the called functions are no-ops for
object "X" if "X->object.parsed" is set. I.e., when we have
already parsed an object, we end up going to a lot of work
just to find out at a low level that there is nothing left
for us to do (and we throw away the data from read_sha1_file
unread).
We can optimize this by moving the check for "do we have an
in-memory object" from 2a before the expensive call to
read_sha1_file in step 1.
This might seem circular, since step 2a uses the type
information determined in step 1 to call the appropriate
lookup function. However, we can notice that all of the
lookup_* functions are backed by lookup_object. In other
words, all of the objects are kept in a master hash table,
and we don't actually need the type to do the "do we have
it" part of the lookup, only to do the "and create it if it
doesn't exist" part.
This can save time whenever we call parse_object on the same
sha1 twice in a single program. Some code paths already
perform this optimization manually, with either:
if (!obj->parsed)
obj = parse_object(obj->sha1);
if you already have a "struct object", or:
struct object *obj = lookup_unknown_object(sha1);
if (!obj || !obj->parsed)
obj = parse_object(sha1);
if you don't. This patch moves the optimization into
parse_object itself.
Most git operations won't notice any impact. Either they
don't parse a lot of duplicate sha1s, or the calling code
takes special care not to re-parse objects. I timed two
code paths that do benefit (there may be more, but these two
were immediately obvious and easy to time).
The first is fast-export, which calls parse_object on each
object it outputs, like this:
object = parse_object(sha1);
if (!object)
die(...);
if (object->flags & SHOWN)
return;
which means that just to realize we have already shown an
object, we will read the whole object from disk!
With this patch, my best-of-five time for "fast-export --all" on
git.git dropped from 26.3s to 21.3s.
The second case is upload-pack, which will call parse_object
for each advertised ref (because it needs to peel tags to
show "^{}" entries). This doesn't matter for most
repositories, because they don't have a lot of refs pointing
to the same objects. However, if you have a big alternates
repository with a shared object db for a number of child
repositories, then the alternates repository will have
duplicated refs representing each of its children.
For example, GitHub's alternates repository for git.git has
~120,000 refs, of which only ~3200 are unique. The time for
upload-pack to print its list of advertised refs dropped
from 3.4s to 0.76s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-05 21:00:01 +00:00
|
|
|
void *buffer;
|
|
|
|
struct object *obj;
|
|
|
|
|
|
|
|
obj = lookup_object(sha1);
|
|
|
|
if (obj && obj->parsed)
|
|
|
|
return obj;
|
2006-09-15 20:30:02 +00:00
|
|
|
|
2012-03-07 10:54:18 +00:00
|
|
|
if ((obj && obj->type == OBJ_BLOB) ||
|
|
|
|
(!obj && has_sha1_file(sha1) &&
|
|
|
|
sha1_object_info(sha1, NULL) == OBJ_BLOB)) {
|
|
|
|
if (check_sha1_signature(repl, NULL, 0, NULL) < 0) {
|
2012-04-30 00:28:45 +00:00
|
|
|
error("sha1 mismatch %s", sha1_to_hex(repl));
|
2012-03-07 10:54:18 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
parse_blob_buffer(lookup_blob(sha1), NULL, 0);
|
|
|
|
return lookup_object(sha1);
|
|
|
|
}
|
|
|
|
|
parse_object: try internal cache before reading object db
When parse_object is called, we do the following:
1. read the object data into a buffer via read_sha1_file
2. call parse_object_buffer, which then:
a. calls the appropriate lookup_{commit,tree,blob,tag}
to either create a new "struct object", or to find
an existing one. We know the appropriate type from
the lookup in step 1.
b. calls the appropriate parse_{commit,tree,blob,tag}
to parse the buffer for the new (or existing) object
In step 2b, all of the called functions are no-ops for
object "X" if "X->object.parsed" is set. I.e., when we have
already parsed an object, we end up going to a lot of work
just to find out at a low level that there is nothing left
for us to do (and we throw away the data from read_sha1_file
unread).
We can optimize this by moving the check for "do we have an
in-memory object" from 2a before the expensive call to
read_sha1_file in step 1.
This might seem circular, since step 2a uses the type
information determined in step 1 to call the appropriate
lookup function. However, we can notice that all of the
lookup_* functions are backed by lookup_object. In other
words, all of the objects are kept in a master hash table,
and we don't actually need the type to do the "do we have
it" part of the lookup, only to do the "and create it if it
doesn't exist" part.
This can save time whenever we call parse_object on the same
sha1 twice in a single program. Some code paths already
perform this optimization manually, with either:
if (!obj->parsed)
obj = parse_object(obj->sha1);
if you already have a "struct object", or:
struct object *obj = lookup_unknown_object(sha1);
if (!obj || !obj->parsed)
obj = parse_object(sha1);
if you don't. This patch moves the optimization into
parse_object itself.
Most git operations won't notice any impact. Either they
don't parse a lot of duplicate sha1s, or the calling code
takes special care not to re-parse objects. I timed two
code paths that do benefit (there may be more, but these two
were immediately obvious and easy to time).
The first is fast-export, which calls parse_object on each
object it outputs, like this:
object = parse_object(sha1);
if (!object)
die(...);
if (object->flags & SHOWN)
return;
which means that just to realize we have already shown an
object, we will read the whole object from disk!
With this patch, my best-of-five time for "fast-export --all" on
git.git dropped from 26.3s to 21.3s.
The second case is upload-pack, which will call parse_object
for each advertised ref (because it needs to peel tags to
show "^{}" entries). This doesn't matter for most
repositories, because they don't have a lot of refs pointing
to the same objects. However, if you have a big alternates
repository with a shared object db for a number of child
repositories, then the alternates repository will have
duplicated refs representing each of its children.
For example, GitHub's alternates repository for git.git has
~120,000 refs, of which only ~3200 are unique. The time for
upload-pack to print its list of advertised refs dropped
from 3.4s to 0.76s.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-05 21:00:01 +00:00
|
|
|
buffer = read_sha1_file(sha1, &type, &size);
|
2005-06-27 10:33:33 +00:00
|
|
|
if (buffer) {
|
2009-01-23 09:07:10 +00:00
|
|
|
if (check_sha1_signature(repl, buffer, size, typename(type)) < 0) {
|
2007-05-25 01:46:22 +00:00
|
|
|
free(buffer);
|
2012-04-30 00:28:45 +00:00
|
|
|
error("sha1 mismatch %s", sha1_to_hex(repl));
|
2007-03-20 17:05:20 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
2006-09-15 20:30:02 +00:00
|
|
|
|
2010-09-03 20:51:53 +00:00
|
|
|
obj = parse_object_buffer(sha1, type, size, buffer, &eaten);
|
2006-09-15 20:30:02 +00:00
|
|
|
if (!eaten)
|
|
|
|
free(buffer);
|
2005-05-06 17:48:34 +00:00
|
|
|
return obj;
|
2005-04-28 14:46:33 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-08-02 23:45:48 +00:00
|
|
|
|
|
|
|
struct object_list *object_list_insert(struct object *item,
|
|
|
|
struct object_list **list_p)
|
|
|
|
{
|
|
|
|
struct object_list *new_list = xmalloc(sizeof(struct object_list));
|
2010-09-05 19:36:33 +00:00
|
|
|
new_list->item = item;
|
|
|
|
new_list->next = *list_p;
|
|
|
|
*list_p = new_list;
|
|
|
|
return new_list;
|
2005-08-02 23:45:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int object_list_contains(struct object_list *list, struct object *obj)
|
|
|
|
{
|
|
|
|
while (list) {
|
|
|
|
if (list->item == obj)
|
|
|
|
return 1;
|
|
|
|
list = list->next;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
Add "named object array" concept
We've had this notion of a "object_list" for a long time, which eventually
grew a "name" member because some users (notably git-rev-list) wanted to
name each object as it is generated.
That object_list is great for some things, but it isn't all that wonderful
for others, and the "name" member is generally not used by everybody.
This patch splits the users of the object_list array up into two: the
traditional list users, who want the list-like format, and who don't
actually use or want the name. And another class of users that really used
the list as an extensible array, and generally wanted to name the objects.
The patch is fairly straightforward, but it's also biggish. Most of it
really just cleans things up: switching the revision parsing and listing
over to the array makes things like the builtin-diff usage much simpler
(we now see exactly how many members the array has, and we don't get the
objects reversed from the order they were on the command line).
One of the main reasons for doing this at all is that the malloc overhead
of the simple object list was actually pretty high, and the array is just
a lot denser. So this patch brings down memory usage by git-rev-list by
just under 3% (on top of all the other memory use optimizations) on the
mozilla archive.
It does add more lines than it removes, and more importantly, it adds a
whole new infrastructure for maintaining lists of objects, but on the
other hand, the new dynamic array code is pretty obvious. The change to
builtin-diff-tree.c shows a fairly good example of why an array interface
is sometimes more natural, and just much simpler for everybody.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 00:42:35 +00:00
|
|
|
|
|
|
|
void add_object_array(struct object *obj, const char *name, struct object_array *array)
|
2007-04-22 16:43:58 +00:00
|
|
|
{
|
|
|
|
add_object_array_with_mode(obj, name, array, S_IFINVALID);
|
|
|
|
}
|
|
|
|
|
object_array_entry: fix memory handling of the name field
Previously, the memory management of the object_array_entry::name
field was inconsistent and undocumented. object_array_entries are
ultimately created by a single function, add_object_array_with_mode(),
which has an argument "const char *name". This function used to
simply set the name field to reference the string pointed to by the
name parameter, and nobody on the object_array side ever freed the
memory. Thus, it assumed that the memory for the name field would be
managed by the caller, and that the lifetime of that string would be
at least as long as the lifetime of the object_array_entry. But
callers were inconsistent:
* Some passed pointers to constant strings or argv entries, which was
OK.
* Some passed pointers to newly-allocated memory, but didn't arrange
for the memory ever to be freed.
* Some passed the return value of sha1_to_hex(), which is a pointer to
a statically-allocated buffer that can be overwritten at any time.
* Some passed pointers to refnames that they received from a
for_each_ref()-type iteration, but the lifetimes of such refnames is
not guaranteed by the refs API.
Bring consistency to this mess by changing object_array to make its
own copy for the object_array_entry::name field and free this memory
when an object_array_entry is deleted from the array.
Many callers were passing the empty string as the name parameter, so
as a performance optimization, treat the empty string specially.
Instead of making a copy, store a pointer to a statically-allocated
empty string to object_array_entry::name. When deleting such an
entry, skip the free().
Change the callers that were already passing copies to
add_object_array_with_mode() to either skip the copy, or (if the
memory needed to be allocated anyway) freeing the memory itself.
A part of this commit effectively reverts
70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg
because the copying introduced by that commit (which is still
necessary) is now done at a deeper level.
Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 09:08:14 +00:00
|
|
|
/*
|
|
|
|
* A zero-length string to which object_array_entry::name can be
|
|
|
|
* initialized without requiring a malloc/free.
|
|
|
|
*/
|
|
|
|
static char object_array_slopbuf[1];
|
|
|
|
|
2007-04-22 16:43:58 +00:00
|
|
|
void add_object_array_with_mode(struct object *obj, const char *name, struct object_array *array, unsigned mode)
|
Add "named object array" concept
We've had this notion of a "object_list" for a long time, which eventually
grew a "name" member because some users (notably git-rev-list) wanted to
name each object as it is generated.
That object_list is great for some things, but it isn't all that wonderful
for others, and the "name" member is generally not used by everybody.
This patch splits the users of the object_list array up into two: the
traditional list users, who want the list-like format, and who don't
actually use or want the name. And another class of users that really used
the list as an extensible array, and generally wanted to name the objects.
The patch is fairly straightforward, but it's also biggish. Most of it
really just cleans things up: switching the revision parsing and listing
over to the array makes things like the builtin-diff usage much simpler
(we now see exactly how many members the array has, and we don't get the
objects reversed from the order they were on the command line).
One of the main reasons for doing this at all is that the malloc overhead
of the simple object list was actually pretty high, and the array is just
a lot denser. So this patch brings down memory usage by git-rev-list by
just under 3% (on top of all the other memory use optimizations) on the
mozilla archive.
It does add more lines than it removes, and more importantly, it adds a
whole new infrastructure for maintaining lists of objects, but on the
other hand, the new dynamic array code is pretty obvious. The change to
builtin-diff-tree.c shows a fairly good example of why an array interface
is sometimes more natural, and just much simpler for everybody.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 00:42:35 +00:00
|
|
|
{
|
|
|
|
unsigned nr = array->nr;
|
|
|
|
unsigned alloc = array->alloc;
|
|
|
|
struct object_array_entry *objects = array->objects;
|
object_array_entry: fix memory handling of the name field
Previously, the memory management of the object_array_entry::name
field was inconsistent and undocumented. object_array_entries are
ultimately created by a single function, add_object_array_with_mode(),
which has an argument "const char *name". This function used to
simply set the name field to reference the string pointed to by the
name parameter, and nobody on the object_array side ever freed the
memory. Thus, it assumed that the memory for the name field would be
managed by the caller, and that the lifetime of that string would be
at least as long as the lifetime of the object_array_entry. But
callers were inconsistent:
* Some passed pointers to constant strings or argv entries, which was
OK.
* Some passed pointers to newly-allocated memory, but didn't arrange
for the memory ever to be freed.
* Some passed the return value of sha1_to_hex(), which is a pointer to
a statically-allocated buffer that can be overwritten at any time.
* Some passed pointers to refnames that they received from a
for_each_ref()-type iteration, but the lifetimes of such refnames is
not guaranteed by the refs API.
Bring consistency to this mess by changing object_array to make its
own copy for the object_array_entry::name field and free this memory
when an object_array_entry is deleted from the array.
Many callers were passing the empty string as the name parameter, so
as a performance optimization, treat the empty string specially.
Instead of making a copy, store a pointer to a statically-allocated
empty string to object_array_entry::name. When deleting such an
entry, skip the free().
Change the callers that were already passing copies to
add_object_array_with_mode() to either skip the copy, or (if the
memory needed to be allocated anyway) freeing the memory itself.
A part of this commit effectively reverts
70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg
because the copying introduced by that commit (which is still
necessary) is now done at a deeper level.
Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 09:08:14 +00:00
|
|
|
struct object_array_entry *entry;
|
Add "named object array" concept
We've had this notion of a "object_list" for a long time, which eventually
grew a "name" member because some users (notably git-rev-list) wanted to
name each object as it is generated.
That object_list is great for some things, but it isn't all that wonderful
for others, and the "name" member is generally not used by everybody.
This patch splits the users of the object_list array up into two: the
traditional list users, who want the list-like format, and who don't
actually use or want the name. And another class of users that really used
the list as an extensible array, and generally wanted to name the objects.
The patch is fairly straightforward, but it's also biggish. Most of it
really just cleans things up: switching the revision parsing and listing
over to the array makes things like the builtin-diff usage much simpler
(we now see exactly how many members the array has, and we don't get the
objects reversed from the order they were on the command line).
One of the main reasons for doing this at all is that the malloc overhead
of the simple object list was actually pretty high, and the array is just
a lot denser. So this patch brings down memory usage by git-rev-list by
just under 3% (on top of all the other memory use optimizations) on the
mozilla archive.
It does add more lines than it removes, and more importantly, it adds a
whole new infrastructure for maintaining lists of objects, but on the
other hand, the new dynamic array code is pretty obvious. The change to
builtin-diff-tree.c shows a fairly good example of why an array interface
is sometimes more natural, and just much simpler for everybody.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 00:42:35 +00:00
|
|
|
|
|
|
|
if (nr >= alloc) {
|
|
|
|
alloc = (alloc + 32) * 2;
|
|
|
|
objects = xrealloc(objects, alloc * sizeof(*objects));
|
|
|
|
array->alloc = alloc;
|
|
|
|
array->objects = objects;
|
|
|
|
}
|
object_array_entry: fix memory handling of the name field
Previously, the memory management of the object_array_entry::name
field was inconsistent and undocumented. object_array_entries are
ultimately created by a single function, add_object_array_with_mode(),
which has an argument "const char *name". This function used to
simply set the name field to reference the string pointed to by the
name parameter, and nobody on the object_array side ever freed the
memory. Thus, it assumed that the memory for the name field would be
managed by the caller, and that the lifetime of that string would be
at least as long as the lifetime of the object_array_entry. But
callers were inconsistent:
* Some passed pointers to constant strings or argv entries, which was
OK.
* Some passed pointers to newly-allocated memory, but didn't arrange
for the memory ever to be freed.
* Some passed the return value of sha1_to_hex(), which is a pointer to
a statically-allocated buffer that can be overwritten at any time.
* Some passed pointers to refnames that they received from a
for_each_ref()-type iteration, but the lifetimes of such refnames is
not guaranteed by the refs API.
Bring consistency to this mess by changing object_array to make its
own copy for the object_array_entry::name field and free this memory
when an object_array_entry is deleted from the array.
Many callers were passing the empty string as the name parameter, so
as a performance optimization, treat the empty string specially.
Instead of making a copy, store a pointer to a statically-allocated
empty string to object_array_entry::name. When deleting such an
entry, skip the free().
Change the callers that were already passing copies to
add_object_array_with_mode() to either skip the copy, or (if the
memory needed to be allocated anyway) freeing the memory itself.
A part of this commit effectively reverts
70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg
because the copying introduced by that commit (which is still
necessary) is now done at a deeper level.
Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 09:08:14 +00:00
|
|
|
entry = &objects[nr];
|
|
|
|
entry->item = obj;
|
|
|
|
if (!name)
|
|
|
|
entry->name = NULL;
|
|
|
|
else if (!*name)
|
|
|
|
/* Use our own empty string instead of allocating one: */
|
|
|
|
entry->name = object_array_slopbuf;
|
|
|
|
else
|
|
|
|
entry->name = xstrdup(name);
|
|
|
|
entry->mode = mode;
|
Add "named object array" concept
We've had this notion of a "object_list" for a long time, which eventually
grew a "name" member because some users (notably git-rev-list) wanted to
name each object as it is generated.
That object_list is great for some things, but it isn't all that wonderful
for others, and the "name" member is generally not used by everybody.
This patch splits the users of the object_list array up into two: the
traditional list users, who want the list-like format, and who don't
actually use or want the name. And another class of users that really used
the list as an extensible array, and generally wanted to name the objects.
The patch is fairly straightforward, but it's also biggish. Most of it
really just cleans things up: switching the revision parsing and listing
over to the array makes things like the builtin-diff usage much simpler
(we now see exactly how many members the array has, and we don't get the
objects reversed from the order they were on the command line).
One of the main reasons for doing this at all is that the malloc overhead
of the simple object list was actually pretty high, and the array is just
a lot denser. So this patch brings down memory usage by git-rev-list by
just under 3% (on top of all the other memory use optimizations) on the
mozilla archive.
It does add more lines than it removes, and more importantly, it adds a
whole new infrastructure for maintaining lists of objects, but on the
other hand, the new dynamic array code is pretty obvious. The change to
builtin-diff-tree.c shows a fairly good example of why an array interface
is sometimes more natural, and just much simpler for everybody.
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-06-20 00:42:35 +00:00
|
|
|
array->nr = ++nr;
|
|
|
|
}
|
2009-01-18 06:27:08 +00:00
|
|
|
|
2013-05-25 09:08:08 +00:00
|
|
|
void object_array_filter(struct object_array *array,
|
|
|
|
object_array_each_func_t want, void *cb_data)
|
2009-01-18 06:27:08 +00:00
|
|
|
{
|
2013-05-25 09:08:08 +00:00
|
|
|
unsigned nr = array->nr, src, dst;
|
2009-01-18 06:27:08 +00:00
|
|
|
struct object_array_entry *objects = array->objects;
|
|
|
|
|
2013-05-25 09:08:08 +00:00
|
|
|
for (src = dst = 0; src < nr; src++) {
|
|
|
|
if (want(&objects[src], cb_data)) {
|
2009-01-18 06:27:08 +00:00
|
|
|
if (src != dst)
|
|
|
|
objects[dst] = objects[src];
|
|
|
|
dst++;
|
object_array_entry: fix memory handling of the name field
Previously, the memory management of the object_array_entry::name
field was inconsistent and undocumented. object_array_entries are
ultimately created by a single function, add_object_array_with_mode(),
which has an argument "const char *name". This function used to
simply set the name field to reference the string pointed to by the
name parameter, and nobody on the object_array side ever freed the
memory. Thus, it assumed that the memory for the name field would be
managed by the caller, and that the lifetime of that string would be
at least as long as the lifetime of the object_array_entry. But
callers were inconsistent:
* Some passed pointers to constant strings or argv entries, which was
OK.
* Some passed pointers to newly-allocated memory, but didn't arrange
for the memory ever to be freed.
* Some passed the return value of sha1_to_hex(), which is a pointer to
a statically-allocated buffer that can be overwritten at any time.
* Some passed pointers to refnames that they received from a
for_each_ref()-type iteration, but the lifetimes of such refnames is
not guaranteed by the refs API.
Bring consistency to this mess by changing object_array to make its
own copy for the object_array_entry::name field and free this memory
when an object_array_entry is deleted from the array.
Many callers were passing the empty string as the name parameter, so
as a performance optimization, treat the empty string specially.
Instead of making a copy, store a pointer to a statically-allocated
empty string to object_array_entry::name. When deleting such an
entry, skip the free().
Change the callers that were already passing copies to
add_object_array_with_mode() to either skip the copy, or (if the
memory needed to be allocated anyway) freeing the memory itself.
A part of this commit effectively reverts
70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg
because the copying introduced by that commit (which is still
necessary) is now done at a deeper level.
Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 09:08:14 +00:00
|
|
|
} else {
|
|
|
|
if (objects[src].name != object_array_slopbuf)
|
|
|
|
free(objects[src].name);
|
2013-05-25 09:08:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
array->nr = dst;
|
|
|
|
}
|
|
|
|
|
2013-05-25 09:08:10 +00:00
|
|
|
/*
|
|
|
|
* Return true iff array already contains an entry with name.
|
|
|
|
*/
|
|
|
|
static int contains_name(struct object_array *array, const char *name)
|
|
|
|
{
|
|
|
|
unsigned nr = array->nr, i;
|
|
|
|
struct object_array_entry *object = array->objects;
|
|
|
|
|
|
|
|
for (i = 0; i < nr; i++, object++)
|
|
|
|
if (!strcmp(object->name, name))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-01-18 06:27:08 +00:00
|
|
|
void object_array_remove_duplicates(struct object_array *array)
|
|
|
|
{
|
2013-05-25 09:08:10 +00:00
|
|
|
unsigned nr = array->nr, src;
|
2009-01-18 06:27:08 +00:00
|
|
|
struct object_array_entry *objects = array->objects;
|
|
|
|
|
2013-05-25 09:08:10 +00:00
|
|
|
array->nr = 0;
|
|
|
|
for (src = 0; src < nr; src++) {
|
|
|
|
if (!contains_name(array, objects[src].name)) {
|
|
|
|
if (src != array->nr)
|
|
|
|
objects[array->nr] = objects[src];
|
|
|
|
array->nr++;
|
object_array_entry: fix memory handling of the name field
Previously, the memory management of the object_array_entry::name
field was inconsistent and undocumented. object_array_entries are
ultimately created by a single function, add_object_array_with_mode(),
which has an argument "const char *name". This function used to
simply set the name field to reference the string pointed to by the
name parameter, and nobody on the object_array side ever freed the
memory. Thus, it assumed that the memory for the name field would be
managed by the caller, and that the lifetime of that string would be
at least as long as the lifetime of the object_array_entry. But
callers were inconsistent:
* Some passed pointers to constant strings or argv entries, which was
OK.
* Some passed pointers to newly-allocated memory, but didn't arrange
for the memory ever to be freed.
* Some passed the return value of sha1_to_hex(), which is a pointer to
a statically-allocated buffer that can be overwritten at any time.
* Some passed pointers to refnames that they received from a
for_each_ref()-type iteration, but the lifetimes of such refnames is
not guaranteed by the refs API.
Bring consistency to this mess by changing object_array to make its
own copy for the object_array_entry::name field and free this memory
when an object_array_entry is deleted from the array.
Many callers were passing the empty string as the name parameter, so
as a performance optimization, treat the empty string specially.
Instead of making a copy, store a pointer to a statically-allocated
empty string to object_array_entry::name. When deleting such an
entry, skip the free().
Change the callers that were already passing copies to
add_object_array_with_mode() to either skip the copy, or (if the
memory needed to be allocated anyway) freeing the memory itself.
A part of this commit effectively reverts
70d26c6e76 read_revisions_from_stdin: make copies for handle_revision_arg
because the copying introduced by that commit (which is still
necessary) is now done at a deeper level.
Signed-off-by: Michael Haggerty <mhagger@alum.mit.edu>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-05-25 09:08:14 +00:00
|
|
|
} else {
|
|
|
|
if (objects[src].name != object_array_slopbuf)
|
|
|
|
free(objects[src].name);
|
2009-01-18 06:27:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2012-03-29 07:21:21 +00:00
|
|
|
|
|
|
|
void clear_object_flags(unsigned flags)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i=0; i < obj_hash_size; i++) {
|
|
|
|
struct object *obj = obj_hash[i];
|
|
|
|
if (obj)
|
|
|
|
obj->flags &= ~flags;
|
|
|
|
}
|
|
|
|
}
|