2005-07-04 20:26:53 +00:00
|
|
|
#include "cache.h"
|
|
|
|
#include "refs.h"
|
|
|
|
#include "pkt-line.h"
|
2006-09-10 10:20:24 +00:00
|
|
|
#include "sideband.h"
|
2005-10-14 01:57:40 +00:00
|
|
|
#include "tag.h"
|
|
|
|
#include "object.h"
|
2005-10-28 02:48:32 +00:00
|
|
|
#include "commit.h"
|
2006-01-11 02:12:17 +00:00
|
|
|
#include "exec_cmd.h"
|
2006-10-30 19:08:43 +00:00
|
|
|
#include "diff.h"
|
|
|
|
#include "revision.h"
|
|
|
|
#include "list-objects.h"
|
2007-10-19 19:47:59 +00:00
|
|
|
#include "run-command.h"
|
2011-08-05 20:54:06 +00:00
|
|
|
#include "sigchain.h"
|
2012-08-03 16:19:16 +00:00
|
|
|
#include "version.h"
|
2005-07-04 20:26:53 +00:00
|
|
|
|
2010-10-08 17:31:15 +00:00
|
|
|
static const char upload_pack_usage[] = "git upload-pack [--strict] [--timeout=<n>] <dir>";
|
2005-07-04 20:26:53 +00:00
|
|
|
|
2006-07-06 04:28:20 +00:00
|
|
|
/* bits #0..7 in revision.h, #8..10 in commit.c */
|
|
|
|
#define THEY_HAVE (1u << 11)
|
|
|
|
#define OUR_REF (1u << 12)
|
|
|
|
#define WANTED (1u << 13)
|
|
|
|
#define COMMON_KNOWN (1u << 14)
|
|
|
|
#define REACHABLE (1u << 15)
|
|
|
|
|
2006-10-30 19:09:53 +00:00
|
|
|
#define SHALLOW (1u << 16)
|
|
|
|
#define NOT_SHALLOW (1u << 17)
|
|
|
|
#define CLIENT_SHALLOW (1u << 18)
|
|
|
|
|
2006-11-24 10:34:27 +00:00
|
|
|
static unsigned long oldest_have;
|
2006-07-06 04:28:20 +00:00
|
|
|
|
2006-08-15 17:23:48 +00:00
|
|
|
static int multi_ack, nr_our_refs;
|
2011-03-29 19:29:10 +00:00
|
|
|
static int no_done;
|
2008-03-04 03:27:33 +00:00
|
|
|
static int use_thin_pack, use_ofs_delta, use_include_tag;
|
2009-06-16 18:41:16 +00:00
|
|
|
static int no_progress, daemon_mode;
|
2009-06-09 23:50:18 +00:00
|
|
|
static int shallow_nr;
|
2006-07-06 01:00:02 +00:00
|
|
|
static struct object_array have_obj;
|
|
|
|
static struct object_array want_obj;
|
2009-09-03 23:08:33 +00:00
|
|
|
static struct object_array extra_edge_obj;
|
2006-08-15 17:23:48 +00:00
|
|
|
static unsigned int timeout;
|
2006-09-10 23:27:08 +00:00
|
|
|
/* 0 for no sideband,
|
|
|
|
* otherwise maximum packet size (up to 65520 bytes).
|
|
|
|
*/
|
2006-08-15 17:23:48 +00:00
|
|
|
static int use_sideband;
|
2008-03-03 02:35:18 +00:00
|
|
|
static int debug_fd;
|
2009-10-31 00:47:33 +00:00
|
|
|
static int advertise_refs;
|
|
|
|
static int stateless_rpc;
|
2005-10-19 21:27:01 +00:00
|
|
|
|
|
|
|
static void reset_timeout(void)
|
|
|
|
{
|
|
|
|
alarm(timeout);
|
|
|
|
}
|
2005-07-04 22:29:17 +00:00
|
|
|
|
2005-07-04 23:35:13 +00:00
|
|
|
static int strip(char *line, int len)
|
|
|
|
{
|
|
|
|
if (len && line[len-1] == '\n')
|
|
|
|
line[--len] = 0;
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2006-06-21 07:30:21 +00:00
|
|
|
static ssize_t send_client_data(int fd, const char *data, ssize_t sz)
|
|
|
|
{
|
2006-09-10 10:20:24 +00:00
|
|
|
if (use_sideband)
|
2006-09-10 23:27:08 +00:00
|
|
|
return send_sideband(1, fd, data, sz, use_sideband);
|
2006-09-10 10:20:24 +00:00
|
|
|
if (fd == 3)
|
|
|
|
/* emergency quit */
|
|
|
|
fd = 2;
|
|
|
|
if (fd == 2) {
|
2007-01-08 15:58:23 +00:00
|
|
|
/* XXX: are we happy to lose stuff here? */
|
2006-09-10 10:20:24 +00:00
|
|
|
xwrite(fd, data, sz);
|
|
|
|
return sz;
|
2006-06-21 07:30:21 +00:00
|
|
|
}
|
2006-09-10 10:20:24 +00:00
|
|
|
return safe_write(fd, data, sz);
|
2006-06-21 07:30:21 +00:00
|
|
|
}
|
|
|
|
|
2007-06-08 09:54:57 +00:00
|
|
|
static FILE *pack_pipe = NULL;
|
2009-04-06 19:28:36 +00:00
|
|
|
static void show_commit(struct commit *commit, void *data)
|
2006-10-30 19:08:43 +00:00
|
|
|
{
|
|
|
|
if (commit->object.flags & BOUNDARY)
|
|
|
|
fputc('-', pack_pipe);
|
|
|
|
if (fputs(sha1_to_hex(commit->object.sha1), pack_pipe) < 0)
|
|
|
|
die("broken output pipe");
|
|
|
|
fputc('\n', pack_pipe);
|
|
|
|
fflush(pack_pipe);
|
|
|
|
free(commit->buffer);
|
|
|
|
commit->buffer = NULL;
|
|
|
|
}
|
|
|
|
|
2011-09-01 22:43:33 +00:00
|
|
|
static void show_object(struct object *obj,
|
|
|
|
const struct name_path *path, const char *component,
|
|
|
|
void *cb_data)
|
2006-10-30 19:08:43 +00:00
|
|
|
{
|
2011-08-17 21:30:34 +00:00
|
|
|
show_object_with_name(pack_pipe, obj, path, component);
|
2006-10-30 19:08:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void show_edge(struct commit *commit)
|
|
|
|
{
|
|
|
|
fprintf(pack_pipe, "-%s\n", sha1_to_hex(commit->object.sha1));
|
|
|
|
}
|
|
|
|
|
2010-07-28 09:39:10 +00:00
|
|
|
static int do_rev_list(int in, int out, void *user_data)
|
2007-10-19 19:48:02 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct rev_info revs;
|
|
|
|
|
2010-02-05 20:57:38 +00:00
|
|
|
pack_pipe = xfdopen(out, "w");
|
2007-10-19 19:48:02 +00:00
|
|
|
init_revisions(&revs, NULL);
|
|
|
|
revs.tag_objects = 1;
|
|
|
|
revs.tree_objects = 1;
|
|
|
|
revs.blob_objects = 1;
|
|
|
|
if (use_thin_pack)
|
|
|
|
revs.edge_hint = 1;
|
|
|
|
|
2010-07-28 09:39:10 +00:00
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
struct object *o = want_obj.objects[i].item;
|
|
|
|
/* why??? */
|
|
|
|
o->flags &= ~UNINTERESTING;
|
|
|
|
add_pending_object(&revs, o, NULL);
|
2007-10-19 19:48:02 +00:00
|
|
|
}
|
2010-07-28 09:39:10 +00:00
|
|
|
for (i = 0; i < have_obj.nr; i++) {
|
|
|
|
struct object *o = have_obj.objects[i].item;
|
|
|
|
o->flags |= UNINTERESTING;
|
|
|
|
add_pending_object(&revs, o, NULL);
|
|
|
|
}
|
|
|
|
setup_revisions(0, NULL, &revs, NULL);
|
2008-02-18 07:31:56 +00:00
|
|
|
if (prepare_revision_walk(&revs))
|
|
|
|
die("revision walk setup failed");
|
2007-10-19 19:48:02 +00:00
|
|
|
mark_edges_uninteresting(revs.commits, &revs, show_edge);
|
2009-09-03 23:08:33 +00:00
|
|
|
if (use_thin_pack)
|
|
|
|
for (i = 0; i < extra_edge_obj.nr; i++)
|
|
|
|
fprintf(pack_pipe, "-%s\n", sha1_to_hex(
|
|
|
|
extra_edge_obj.objects[i].item->sha1));
|
2009-04-06 19:28:36 +00:00
|
|
|
traverse_commit_list(&revs, show_commit, show_object, NULL);
|
2007-12-08 21:19:14 +00:00
|
|
|
fflush(pack_pipe);
|
|
|
|
fclose(pack_pipe);
|
2007-10-19 19:48:03 +00:00
|
|
|
return 0;
|
2007-10-19 19:48:02 +00:00
|
|
|
}
|
|
|
|
|
2005-07-04 22:29:17 +00:00
|
|
|
static void create_pack_file(void)
|
|
|
|
{
|
2007-10-19 19:48:03 +00:00
|
|
|
struct async rev_list;
|
2007-10-19 19:47:59 +00:00
|
|
|
struct child_process pack_objects;
|
2006-07-06 01:00:02 +00:00
|
|
|
int create_full_pack = (nr_our_refs == want_obj.nr && !have_obj.nr);
|
2006-06-21 05:48:23 +00:00
|
|
|
char data[8193], progress[128];
|
2006-06-21 07:30:21 +00:00
|
|
|
char abort_msg[] = "aborting due to possible repository "
|
|
|
|
"corruption on the remote side.";
|
2006-06-21 01:26:34 +00:00
|
|
|
int buffered = -1;
|
2009-12-10 20:17:11 +00:00
|
|
|
ssize_t sz;
|
2007-10-19 19:47:59 +00:00
|
|
|
const char *argv[10];
|
|
|
|
int arg = 0;
|
2005-07-04 23:35:13 +00:00
|
|
|
|
upload-pack: start pack-objects before async rev-list
In a pthread-enabled version of upload-pack, there's a race condition
that can cause a deadlock on the fflush(NULL) we call from run-command.
What happens is this:
1. Upload-pack is informed we are doing a shallow clone.
2. We call start_async() to spawn a thread that will generate rev-list
results to feed to pack-objects. It gets a file descriptor to a
pipe which will eventually hook to pack-objects.
3. The rev-list thread uses fdopen to create a new output stream
around the fd we gave it, called pack_pipe.
4. The thread writes results to pack_pipe. Outside of our control,
libc is doing locking on the stream. We keep writing until the OS
pipe buffer is full, and then we block in write(), still holding
the lock.
5. The main thread now uses start_command to spawn pack-objects.
Before forking, it calls fflush(NULL) to flush every stdio output
buffer. It blocks trying to get the lock on pack_pipe.
And we have a deadlock. The thread will block until somebody starts
reading from the pipe. But nobody will read from the pipe until we
finish flushing to the pipe.
To fix this, we swap the start order: we start the
pack-objects reader first, and then the rev-list writer
after. Thus the problematic fflush(NULL) happens before we
even open the new file descriptor (and even if it didn't,
flushing should no longer block, as the reader at the end of
the pipe is now active).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-06 21:33:33 +00:00
|
|
|
argv[arg++] = "pack-objects";
|
|
|
|
if (!shallow_nr) {
|
2009-06-09 23:50:18 +00:00
|
|
|
argv[arg++] = "--revs";
|
|
|
|
if (create_full_pack)
|
|
|
|
argv[arg++] = "--all";
|
|
|
|
else if (use_thin_pack)
|
|
|
|
argv[arg++] = "--thin";
|
|
|
|
}
|
2005-07-04 23:35:13 +00:00
|
|
|
|
2007-10-19 19:47:59 +00:00
|
|
|
argv[arg++] = "--stdout";
|
|
|
|
if (!no_progress)
|
|
|
|
argv[arg++] = "--progress";
|
|
|
|
if (use_ofs_delta)
|
|
|
|
argv[arg++] = "--delta-base-offset";
|
2008-03-04 03:27:33 +00:00
|
|
|
if (use_include_tag)
|
|
|
|
argv[arg++] = "--include-tag";
|
2007-10-19 19:47:59 +00:00
|
|
|
argv[arg++] = NULL;
|
|
|
|
|
|
|
|
memset(&pack_objects, 0, sizeof(pack_objects));
|
upload-pack: start pack-objects before async rev-list
In a pthread-enabled version of upload-pack, there's a race condition
that can cause a deadlock on the fflush(NULL) we call from run-command.
What happens is this:
1. Upload-pack is informed we are doing a shallow clone.
2. We call start_async() to spawn a thread that will generate rev-list
results to feed to pack-objects. It gets a file descriptor to a
pipe which will eventually hook to pack-objects.
3. The rev-list thread uses fdopen to create a new output stream
around the fd we gave it, called pack_pipe.
4. The thread writes results to pack_pipe. Outside of our control,
libc is doing locking on the stream. We keep writing until the OS
pipe buffer is full, and then we block in write(), still holding
the lock.
5. The main thread now uses start_command to spawn pack-objects.
Before forking, it calls fflush(NULL) to flush every stdio output
buffer. It blocks trying to get the lock on pack_pipe.
And we have a deadlock. The thread will block until somebody starts
reading from the pipe. But nobody will read from the pipe until we
finish flushing to the pipe.
To fix this, we swap the start order: we start the
pack-objects reader first, and then the rev-list writer
after. Thus the problematic fflush(NULL) happens before we
even open the new file descriptor (and even if it didn't,
flushing should no longer block, as the reader at the end of
the pipe is now active).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-06 21:33:33 +00:00
|
|
|
pack_objects.in = -1;
|
2007-10-19 19:47:59 +00:00
|
|
|
pack_objects.out = -1;
|
|
|
|
pack_objects.err = -1;
|
|
|
|
pack_objects.git_cmd = 1;
|
|
|
|
pack_objects.argv = argv;
|
2007-10-19 19:48:03 +00:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
if (start_command(&pack_objects))
|
2008-08-31 16:39:19 +00:00
|
|
|
die("git upload-pack: unable to fork git-pack-objects");
|
2006-06-21 01:26:34 +00:00
|
|
|
|
upload-pack: start pack-objects before async rev-list
In a pthread-enabled version of upload-pack, there's a race condition
that can cause a deadlock on the fflush(NULL) we call from run-command.
What happens is this:
1. Upload-pack is informed we are doing a shallow clone.
2. We call start_async() to spawn a thread that will generate rev-list
results to feed to pack-objects. It gets a file descriptor to a
pipe which will eventually hook to pack-objects.
3. The rev-list thread uses fdopen to create a new output stream
around the fd we gave it, called pack_pipe.
4. The thread writes results to pack_pipe. Outside of our control,
libc is doing locking on the stream. We keep writing until the OS
pipe buffer is full, and then we block in write(), still holding
the lock.
5. The main thread now uses start_command to spawn pack-objects.
Before forking, it calls fflush(NULL) to flush every stdio output
buffer. It blocks trying to get the lock on pack_pipe.
And we have a deadlock. The thread will block until somebody starts
reading from the pipe. But nobody will read from the pipe until we
finish flushing to the pipe.
To fix this, we swap the start order: we start the
pack-objects reader first, and then the rev-list writer
after. Thus the problematic fflush(NULL) happens before we
even open the new file descriptor (and even if it didn't,
flushing should no longer block, as the reader at the end of
the pipe is now active).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-04-06 21:33:33 +00:00
|
|
|
if (shallow_nr) {
|
|
|
|
memset(&rev_list, 0, sizeof(rev_list));
|
|
|
|
rev_list.proc = do_rev_list;
|
|
|
|
rev_list.out = pack_objects.in;
|
|
|
|
if (start_async(&rev_list))
|
|
|
|
die("git upload-pack: unable to fork git-rev-list");
|
|
|
|
}
|
|
|
|
else {
|
2009-09-12 08:43:27 +00:00
|
|
|
FILE *pipe_fd = xfdopen(pack_objects.in, "w");
|
2009-06-09 23:50:18 +00:00
|
|
|
if (!create_full_pack) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < want_obj.nr; i++)
|
|
|
|
fprintf(pipe_fd, "%s\n", sha1_to_hex(want_obj.objects[i].item->sha1));
|
|
|
|
fprintf(pipe_fd, "--not\n");
|
|
|
|
for (i = 0; i < have_obj.nr; i++)
|
|
|
|
fprintf(pipe_fd, "%s\n", sha1_to_hex(have_obj.objects[i].item->sha1));
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(pipe_fd, "\n");
|
|
|
|
fflush(pipe_fd);
|
|
|
|
fclose(pipe_fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-10-19 19:47:59 +00:00
|
|
|
/* We read from pack_objects.err to capture stderr output for
|
|
|
|
* progress bar, and pack_objects.out to capture the pack data.
|
2006-06-21 01:26:34 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
struct pollfd pfd[2];
|
2006-06-21 05:48:23 +00:00
|
|
|
int pe, pu, pollsize;
|
2006-06-21 01:26:34 +00:00
|
|
|
|
2006-07-18 17:14:51 +00:00
|
|
|
reset_timeout();
|
|
|
|
|
2006-06-21 01:26:34 +00:00
|
|
|
pollsize = 0;
|
2006-06-21 05:48:23 +00:00
|
|
|
pe = pu = -1;
|
2006-06-21 01:26:34 +00:00
|
|
|
|
2007-10-19 19:47:59 +00:00
|
|
|
if (0 <= pack_objects.out) {
|
|
|
|
pfd[pollsize].fd = pack_objects.out;
|
2006-06-21 01:26:34 +00:00
|
|
|
pfd[pollsize].events = POLLIN;
|
|
|
|
pu = pollsize;
|
|
|
|
pollsize++;
|
|
|
|
}
|
2007-10-19 19:47:59 +00:00
|
|
|
if (0 <= pack_objects.err) {
|
|
|
|
pfd[pollsize].fd = pack_objects.err;
|
2006-06-21 05:48:23 +00:00
|
|
|
pfd[pollsize].events = POLLIN;
|
|
|
|
pe = pollsize;
|
|
|
|
pollsize++;
|
|
|
|
}
|
2006-06-21 01:26:34 +00:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
if (!pollsize)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (poll(pfd, pollsize, -1) < 0) {
|
|
|
|
if (errno != EINTR) {
|
|
|
|
error("poll failed, resuming: %s",
|
|
|
|
strerror(errno));
|
|
|
|
sleep(1);
|
2006-06-21 01:26:34 +00:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
continue;
|
|
|
|
}
|
2009-11-11 22:24:42 +00:00
|
|
|
if (0 <= pe && (pfd[pe].revents & (POLLIN|POLLHUP))) {
|
|
|
|
/* Status ready; we ship that in the side-band
|
|
|
|
* or dump to the standard error.
|
|
|
|
*/
|
|
|
|
sz = xread(pack_objects.err, progress,
|
|
|
|
sizeof(progress));
|
|
|
|
if (0 < sz)
|
|
|
|
send_client_data(2, progress, sz);
|
|
|
|
else if (sz == 0) {
|
|
|
|
close(pack_objects.err);
|
|
|
|
pack_objects.err = -1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
goto fail;
|
|
|
|
/* give priority to status messages */
|
|
|
|
continue;
|
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
if (0 <= pu && (pfd[pu].revents & (POLLIN|POLLHUP))) {
|
|
|
|
/* Data ready; we keep the last byte to ourselves
|
|
|
|
* in case we detect broken rev-list, so that we
|
|
|
|
* can leave the stream corrupted. This is
|
|
|
|
* unfortunate -- unpack-objects would happily
|
|
|
|
* accept a valid packdata with trailing garbage,
|
|
|
|
* so appending garbage after we pass all the
|
|
|
|
* pack data is not good enough to signal
|
|
|
|
* breakage to downstream.
|
|
|
|
*/
|
|
|
|
char *cp = data;
|
|
|
|
ssize_t outsz = 0;
|
|
|
|
if (0 <= buffered) {
|
|
|
|
*cp++ = buffered;
|
|
|
|
outsz++;
|
2006-06-21 01:26:34 +00:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
sz = xread(pack_objects.out, cp,
|
|
|
|
sizeof(data) - outsz);
|
|
|
|
if (0 < sz)
|
2009-12-10 20:17:11 +00:00
|
|
|
;
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
else if (sz == 0) {
|
|
|
|
close(pack_objects.out);
|
|
|
|
pack_objects.out = -1;
|
2006-06-21 05:48:23 +00:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
else
|
|
|
|
goto fail;
|
|
|
|
sz += outsz;
|
|
|
|
if (1 < sz) {
|
|
|
|
buffered = data[sz-1] & 0xFF;
|
|
|
|
sz--;
|
2006-06-21 01:26:34 +00:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
else
|
|
|
|
buffered = -1;
|
|
|
|
sz = send_client_data(1, data, sz);
|
|
|
|
if (sz < 0)
|
2006-06-21 01:26:34 +00:00
|
|
|
goto fail;
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
}
|
|
|
|
}
|
2006-06-21 01:26:34 +00:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
if (finish_command(&pack_objects)) {
|
2008-08-31 16:39:19 +00:00
|
|
|
error("git upload-pack: git-pack-objects died with error.");
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
goto fail;
|
|
|
|
}
|
2009-06-09 23:50:18 +00:00
|
|
|
if (shallow_nr && finish_async(&rev_list))
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
goto fail; /* error was already reported */
|
2006-06-21 01:26:34 +00:00
|
|
|
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
/* flush the data */
|
|
|
|
if (0 <= buffered) {
|
|
|
|
data[0] = buffered;
|
|
|
|
sz = send_client_data(1, data, 1);
|
|
|
|
if (sz < 0)
|
|
|
|
goto fail;
|
|
|
|
fprintf(stderr, "flushed.\n");
|
2006-06-21 01:26:34 +00:00
|
|
|
}
|
upload-pack: Use finish_{command,async}() instead of waitpid().
upload-pack spawns two processes, rev-list and pack-objects, and carefully
monitors their status so that it can report failure to the remote end.
This change removes the complicated procedures on the grounds of the
following observations:
- If everything is OK, rev-list closes its output pipe end, upon which
pack-objects (which reads from the pipe) sees EOF and terminates itself,
closing its output (and error) pipes. upload-pack reads from both until
it sees EOF in both. It collects the exit codes of the child processes
(which indicate success) and terminates successfully.
- If rev-list sees an error, it closes its output and terminates with
failure. pack-objects sees EOF in its input and terminates successfully.
Again upload-pack reads its inputs until EOF. When it now collects
the exit codes of its child processes, it notices the failure of rev-list
and signals failure to the remote end.
- If pack-objects sees an error, it terminates with failure. Since this
breaks the pipe to rev-list, rev-list is killed with SIGPIPE.
upload-pack reads its input until EOF, then collects the exit codes of
the child processes, notices their failures, and signals failure to the
remote end.
- If upload-pack itself dies unexpectedly, pack-objects is killed with
SIGPIPE, and subsequently also rev-list.
The upshot of this is that precise monitoring of child processes is not
required because both terminate if either one of them dies unexpectedly.
This allows us to use finish_command() and finish_async() instead of
an explicit waitpid(2) call.
The change is smaller than it looks because most of it only reduces the
indentation of a large part of the inner loop.
Signed-off-by: Johannes Sixt <johannes.sixt@telecom.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-04 19:46:48 +00:00
|
|
|
if (use_sideband)
|
|
|
|
packet_flush(1);
|
|
|
|
return;
|
|
|
|
|
2006-06-21 01:26:34 +00:00
|
|
|
fail:
|
2006-06-21 07:30:21 +00:00
|
|
|
send_client_data(3, abort_msg, sizeof(abort_msg));
|
2008-08-31 16:39:19 +00:00
|
|
|
die("git upload-pack: %s", abort_msg);
|
2005-07-04 22:29:17 +00:00
|
|
|
}
|
|
|
|
|
2005-07-04 20:26:53 +00:00
|
|
|
static int got_sha1(char *hex, unsigned char *sha1)
|
|
|
|
{
|
2006-07-06 01:00:02 +00:00
|
|
|
struct object *o;
|
2006-07-06 04:28:20 +00:00
|
|
|
int we_knew_they_have = 0;
|
2006-07-06 01:00:02 +00:00
|
|
|
|
2005-07-04 20:26:53 +00:00
|
|
|
if (get_sha1_hex(hex, sha1))
|
2008-08-31 16:39:19 +00:00
|
|
|
die("git upload-pack: expected SHA1 object, got '%s'", hex);
|
2005-07-04 22:29:17 +00:00
|
|
|
if (!has_sha1_file(sha1))
|
2006-07-06 04:28:20 +00:00
|
|
|
return -1;
|
2006-07-06 01:00:02 +00:00
|
|
|
|
|
|
|
o = lookup_object(sha1);
|
|
|
|
if (!(o && o->parsed))
|
|
|
|
o = parse_object(sha1);
|
|
|
|
if (!o)
|
|
|
|
die("oops (%s)", sha1_to_hex(sha1));
|
2006-08-13 05:16:51 +00:00
|
|
|
if (o->type == OBJ_COMMIT) {
|
2006-07-06 01:00:02 +00:00
|
|
|
struct commit_list *parents;
|
2006-07-06 04:28:20 +00:00
|
|
|
struct commit *commit = (struct commit *)o;
|
2006-07-06 01:00:02 +00:00
|
|
|
if (o->flags & THEY_HAVE)
|
2006-07-06 04:28:20 +00:00
|
|
|
we_knew_they_have = 1;
|
|
|
|
else
|
|
|
|
o->flags |= THEY_HAVE;
|
|
|
|
if (!oldest_have || (commit->date < oldest_have))
|
|
|
|
oldest_have = commit->date;
|
|
|
|
for (parents = commit->parents;
|
2006-07-06 01:00:02 +00:00
|
|
|
parents;
|
|
|
|
parents = parents->next)
|
|
|
|
parents->item->object.flags |= THEY_HAVE;
|
2005-07-04 22:29:17 +00:00
|
|
|
}
|
2006-07-06 04:28:20 +00:00
|
|
|
if (!we_knew_they_have) {
|
|
|
|
add_object_array(o, NULL, &have_obj);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int reachable(struct commit *want)
|
|
|
|
{
|
|
|
|
struct commit_list *work = NULL;
|
|
|
|
|
2010-11-27 01:58:14 +00:00
|
|
|
commit_list_insert_by_date(want, &work);
|
2006-07-06 04:28:20 +00:00
|
|
|
while (work) {
|
|
|
|
struct commit_list *list = work->next;
|
|
|
|
struct commit *commit = work->item;
|
|
|
|
free(work);
|
|
|
|
work = list;
|
|
|
|
|
|
|
|
if (commit->object.flags & THEY_HAVE) {
|
|
|
|
want->object.flags |= COMMON_KNOWN;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!commit->object.parsed)
|
|
|
|
parse_object(commit->object.sha1);
|
|
|
|
if (commit->object.flags & REACHABLE)
|
|
|
|
continue;
|
|
|
|
commit->object.flags |= REACHABLE;
|
|
|
|
if (commit->date < oldest_have)
|
|
|
|
continue;
|
|
|
|
for (list = commit->parents; list; list = list->next) {
|
|
|
|
struct commit *parent = list->item;
|
|
|
|
if (!(parent->object.flags & REACHABLE))
|
2010-11-27 01:58:14 +00:00
|
|
|
commit_list_insert_by_date(parent, &work);
|
2006-07-06 04:28:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
want->object.flags |= REACHABLE;
|
|
|
|
clear_commit_marks(want, REACHABLE);
|
|
|
|
free_commit_list(work);
|
|
|
|
return (want->object.flags & COMMON_KNOWN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ok_to_give_up(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!have_obj.nr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
struct object *want = want_obj.objects[i].item;
|
|
|
|
|
|
|
|
if (want->flags & COMMON_KNOWN)
|
|
|
|
continue;
|
|
|
|
want = deref_tag(want, "a want line", 0);
|
|
|
|
if (!want || want->type != OBJ_COMMIT) {
|
|
|
|
/* no way to tell if this is reachable by
|
|
|
|
* looking at the ancestry chain alone, so
|
|
|
|
* leave a note to ourselves not to worry about
|
|
|
|
* this object anymore.
|
|
|
|
*/
|
|
|
|
want_obj.objects[i].item->flags |= COMMON_KNOWN;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!reachable((struct commit *)want))
|
|
|
|
return 0;
|
|
|
|
}
|
2005-07-04 22:29:17 +00:00
|
|
|
return 1;
|
2005-07-04 20:26:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int get_common_commits(void)
|
|
|
|
{
|
|
|
|
static char line[1000];
|
2006-07-06 01:12:12 +00:00
|
|
|
unsigned char sha1[20];
|
2009-10-31 00:47:25 +00:00
|
|
|
char last_hex[41];
|
2011-03-14 23:48:39 +00:00
|
|
|
int got_common = 0;
|
|
|
|
int got_other = 0;
|
2011-03-29 19:29:10 +00:00
|
|
|
int sent_ready = 0;
|
2005-07-04 20:26:53 +00:00
|
|
|
|
2005-10-28 02:48:32 +00:00
|
|
|
save_commit_buffer = 0;
|
|
|
|
|
2009-09-01 05:35:10 +00:00
|
|
|
for (;;) {
|
2009-03-07 20:02:26 +00:00
|
|
|
int len = packet_read_line(0, line, sizeof(line));
|
2005-10-19 21:27:01 +00:00
|
|
|
reset_timeout();
|
2005-07-04 20:26:53 +00:00
|
|
|
|
|
|
|
if (!len) {
|
2011-03-14 23:48:39 +00:00
|
|
|
if (multi_ack == 2 && got_common
|
2011-03-29 19:29:10 +00:00
|
|
|
&& !got_other && ok_to_give_up()) {
|
|
|
|
sent_ready = 1;
|
2011-03-14 23:48:39 +00:00
|
|
|
packet_write(1, "ACK %s ready\n", last_hex);
|
2011-03-29 19:29:10 +00:00
|
|
|
}
|
2006-07-06 01:00:02 +00:00
|
|
|
if (have_obj.nr == 0 || multi_ack)
|
2005-10-28 02:49:16 +00:00
|
|
|
packet_write(1, "NAK\n");
|
2011-03-29 19:29:10 +00:00
|
|
|
|
|
|
|
if (no_done && sent_ready) {
|
|
|
|
packet_write(1, "ACK %s\n", last_hex);
|
|
|
|
return 0;
|
|
|
|
}
|
2009-10-31 00:47:33 +00:00
|
|
|
if (stateless_rpc)
|
|
|
|
exit(0);
|
2011-03-14 23:48:39 +00:00
|
|
|
got_common = 0;
|
|
|
|
got_other = 0;
|
2005-07-04 20:26:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
2009-03-07 20:02:26 +00:00
|
|
|
strip(line, len);
|
Mechanical conversion to use prefixcmp()
This mechanically converts strncmp() to use prefixcmp(), but only when
the parameters match specific patterns, so that they can be verified
easily. Leftover from this will be fixed in a separate step, including
idiotic conversions like
if (!strncmp("foo", arg, 3))
=>
if (!(-prefixcmp(arg, "foo")))
This was done by using this script in px.perl
#!/usr/bin/perl -i.bak -p
if (/strncmp\(([^,]+), "([^\\"]*)", (\d+)\)/ && (length($2) == $3)) {
s|strncmp\(([^,]+), "([^\\"]*)", (\d+)\)|prefixcmp($1, "$2")|;
}
if (/strncmp\("([^\\"]*)", ([^,]+), (\d+)\)/ && (length($1) == $3)) {
s|strncmp\("([^\\"]*)", ([^,]+), (\d+)\)|(-prefixcmp($2, "$1"))|;
}
and running:
$ git grep -l strncmp -- '*.c' | xargs perl px.perl
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-20 09:53:29 +00:00
|
|
|
if (!prefixcmp(line, "have ")) {
|
2006-07-06 04:28:20 +00:00
|
|
|
switch (got_sha1(line+5, sha1)) {
|
|
|
|
case -1: /* they have what we do not */
|
2011-03-14 23:48:39 +00:00
|
|
|
got_other = 1;
|
2009-10-31 00:47:25 +00:00
|
|
|
if (multi_ack && ok_to_give_up()) {
|
|
|
|
const char *hex = sha1_to_hex(sha1);
|
2011-03-29 19:29:10 +00:00
|
|
|
if (multi_ack == 2) {
|
|
|
|
sent_ready = 1;
|
2009-10-31 00:47:25 +00:00
|
|
|
packet_write(1, "ACK %s ready\n", hex);
|
2011-03-29 19:29:10 +00:00
|
|
|
} else
|
2009-10-31 00:47:25 +00:00
|
|
|
packet_write(1, "ACK %s continue\n", hex);
|
|
|
|
}
|
2006-07-06 04:28:20 +00:00
|
|
|
break;
|
|
|
|
default:
|
2011-03-14 23:48:39 +00:00
|
|
|
got_common = 1;
|
2009-10-31 00:47:25 +00:00
|
|
|
memcpy(last_hex, sha1_to_hex(sha1), 41);
|
|
|
|
if (multi_ack == 2)
|
|
|
|
packet_write(1, "ACK %s common\n", last_hex);
|
|
|
|
else if (multi_ack)
|
|
|
|
packet_write(1, "ACK %s continue\n", last_hex);
|
2006-07-06 01:12:12 +00:00
|
|
|
else if (have_obj.nr == 1)
|
2009-10-31 00:47:25 +00:00
|
|
|
packet_write(1, "ACK %s\n", last_hex);
|
2006-07-06 04:28:20 +00:00
|
|
|
break;
|
2005-10-25 21:55:24 +00:00
|
|
|
}
|
2005-07-04 20:26:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp(line, "done")) {
|
2006-07-06 01:00:02 +00:00
|
|
|
if (have_obj.nr > 0) {
|
2005-10-28 02:49:16 +00:00
|
|
|
if (multi_ack)
|
2006-07-06 01:12:12 +00:00
|
|
|
packet_write(1, "ACK %s\n", last_hex);
|
2005-10-28 02:49:16 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2005-07-04 20:26:53 +00:00
|
|
|
packet_write(1, "NAK\n");
|
|
|
|
return -1;
|
|
|
|
}
|
2008-08-31 16:39:19 +00:00
|
|
|
die("git upload-pack: expected SHA1 list, got '%s'", line);
|
2005-07-04 20:26:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-05 20:54:06 +00:00
|
|
|
static void check_non_tip(void)
|
|
|
|
{
|
|
|
|
static const char *argv[] = {
|
|
|
|
"rev-list", "--stdin", NULL,
|
|
|
|
};
|
|
|
|
static struct child_process cmd;
|
|
|
|
struct object *o;
|
|
|
|
char namebuf[42]; /* ^ + SHA-1 + LF */
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* In the normal in-process case non-tip request can never happen */
|
|
|
|
if (!stateless_rpc)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
cmd.argv = argv;
|
|
|
|
cmd.git_cmd = 1;
|
|
|
|
cmd.no_stderr = 1;
|
|
|
|
cmd.in = -1;
|
|
|
|
cmd.out = -1;
|
|
|
|
|
|
|
|
if (start_command(&cmd))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If rev-list --stdin encounters an unknown commit, it
|
|
|
|
* terminates, which will cause SIGPIPE in the write loop
|
|
|
|
* below.
|
|
|
|
*/
|
|
|
|
sigchain_push(SIGPIPE, SIG_IGN);
|
|
|
|
|
|
|
|
namebuf[0] = '^';
|
|
|
|
namebuf[41] = '\n';
|
|
|
|
for (i = get_max_object_index(); 0 < i; ) {
|
|
|
|
o = get_indexed_object(--i);
|
2011-08-24 05:47:17 +00:00
|
|
|
if (!o)
|
|
|
|
continue;
|
2011-08-05 20:54:06 +00:00
|
|
|
if (!(o->flags & OUR_REF))
|
|
|
|
continue;
|
|
|
|
memcpy(namebuf + 1, sha1_to_hex(o->sha1), 40);
|
|
|
|
if (write_in_full(cmd.in, namebuf, 42) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
namebuf[40] = '\n';
|
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
o = want_obj.objects[i].item;
|
|
|
|
if (o->flags & OUR_REF)
|
|
|
|
continue;
|
|
|
|
memcpy(namebuf, sha1_to_hex(o->sha1), 40);
|
|
|
|
if (write_in_full(cmd.in, namebuf, 41) < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
close(cmd.in);
|
|
|
|
|
|
|
|
sigchain_pop(SIGPIPE);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The commits out of the rev-list are not ancestors of
|
|
|
|
* our ref.
|
|
|
|
*/
|
|
|
|
i = read_in_full(cmd.out, namebuf, 1);
|
|
|
|
if (i)
|
|
|
|
goto error;
|
|
|
|
close(cmd.out);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rev-list may have died by encountering a bad commit
|
|
|
|
* in the history, in which case we do want to bail out
|
|
|
|
* even when it showed no commit.
|
|
|
|
*/
|
|
|
|
if (finish_command(&cmd))
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* All the non-tip ones are ancestors of what we advertised */
|
|
|
|
return;
|
|
|
|
|
|
|
|
error:
|
|
|
|
/* Pick one of them (we know there at least is one) */
|
|
|
|
for (i = 0; i < want_obj.nr; i++) {
|
|
|
|
o = want_obj.objects[i].item;
|
|
|
|
if (!(o->flags & OUR_REF))
|
|
|
|
die("git upload-pack: not our ref %s",
|
|
|
|
sha1_to_hex(o->sha1));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-07-06 01:00:02 +00:00
|
|
|
static void receive_needs(void)
|
2005-07-04 22:29:17 +00:00
|
|
|
{
|
2010-08-29 02:04:17 +00:00
|
|
|
struct object_array shallows = OBJECT_ARRAY_INIT;
|
2005-07-04 22:29:17 +00:00
|
|
|
static char line[1000];
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 19:09:29 +00:00
|
|
|
int len, depth = 0;
|
2011-08-05 20:54:06 +00:00
|
|
|
int has_non_tip = 0;
|
2005-07-04 22:29:17 +00:00
|
|
|
|
2009-06-09 23:50:18 +00:00
|
|
|
shallow_nr = 0;
|
2008-03-03 02:35:18 +00:00
|
|
|
if (debug_fd)
|
use write_str_in_full helper to avoid literal string lengths
In 2d14d65 (Use a clearer style to issue commands to remote helpers,
2009-09-03) I happened to notice two changes like this:
- write_in_full(helper->in, "list\n", 5);
+
+ strbuf_addstr(&buf, "list\n");
+ write_in_full(helper->in, buf.buf, buf.len);
+ strbuf_reset(&buf);
IMHO, it would be better to define a new function,
static inline ssize_t write_str_in_full(int fd, const char *str)
{
return write_in_full(fd, str, strlen(str));
}
and then use it like this:
- strbuf_addstr(&buf, "list\n");
- write_in_full(helper->in, buf.buf, buf.len);
- strbuf_reset(&buf);
+ write_str_in_full(helper->in, "list\n");
Thus not requiring the added allocation, and still avoiding
the maintenance risk of literal string lengths.
These days, compilers are good enough that strlen("literal")
imposes no run-time cost.
Transformed via this:
perl -pi -e \
's/write_in_full\((.*?), (".*?"), \d+\)/write_str_in_full($1, $2)/'\
$(git grep -l 'write_in_full.*"')
Signed-off-by: Jim Meyering <meyering@redhat.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-09-12 08:54:32 +00:00
|
|
|
write_str_in_full(debug_fd, "#S\n");
|
2005-07-04 22:29:17 +00:00
|
|
|
for (;;) {
|
2005-10-25 01:59:18 +00:00
|
|
|
struct object *o;
|
2012-01-08 21:06:19 +00:00
|
|
|
const char *features;
|
2006-07-06 00:41:39 +00:00
|
|
|
unsigned char sha1_buf[20];
|
2005-07-04 22:29:17 +00:00
|
|
|
len = packet_read_line(0, line, sizeof(line));
|
2005-10-19 21:27:01 +00:00
|
|
|
reset_timeout();
|
2005-07-04 22:29:17 +00:00
|
|
|
if (!len)
|
2006-10-30 19:09:06 +00:00
|
|
|
break;
|
2008-03-03 02:35:18 +00:00
|
|
|
if (debug_fd)
|
|
|
|
write_in_full(debug_fd, line, len);
|
2005-10-05 21:49:54 +00:00
|
|
|
|
2007-02-20 09:54:00 +00:00
|
|
|
if (!prefixcmp(line, "shallow ")) {
|
2006-10-30 19:09:06 +00:00
|
|
|
unsigned char sha1[20];
|
|
|
|
struct object *object;
|
|
|
|
if (get_sha1(line + 8, sha1))
|
|
|
|
die("invalid shallow line: %s", line);
|
|
|
|
object = parse_object(sha1);
|
|
|
|
if (!object)
|
|
|
|
die("did not find object for %s", line);
|
2013-01-08 11:32:36 +00:00
|
|
|
if (object->type != OBJ_COMMIT)
|
|
|
|
die("invalid shallow object %s", sha1_to_hex(sha1));
|
2006-10-30 19:09:53 +00:00
|
|
|
object->flags |= CLIENT_SHALLOW;
|
2006-10-30 19:09:06 +00:00
|
|
|
add_object_array(object, NULL, &shallows);
|
|
|
|
continue;
|
|
|
|
}
|
2007-02-20 09:54:00 +00:00
|
|
|
if (!prefixcmp(line, "deepen ")) {
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 19:09:29 +00:00
|
|
|
char *end;
|
|
|
|
depth = strtol(line + 7, &end, 0);
|
|
|
|
if (end == line + 7 || depth <= 0)
|
|
|
|
die("Invalid deepen: %s", line);
|
|
|
|
continue;
|
|
|
|
}
|
2007-02-20 09:54:00 +00:00
|
|
|
if (prefixcmp(line, "want ") ||
|
2006-07-06 00:41:39 +00:00
|
|
|
get_sha1_hex(line+5, sha1_buf))
|
2008-08-31 16:39:19 +00:00
|
|
|
die("git upload-pack: protocol error, "
|
2005-10-05 21:49:54 +00:00
|
|
|
"expected to get sha, not '%s'", line);
|
2012-01-08 21:06:19 +00:00
|
|
|
|
|
|
|
features = line + 45;
|
|
|
|
|
|
|
|
if (parse_feature_request(features, "multi_ack_detailed"))
|
2009-10-31 00:47:25 +00:00
|
|
|
multi_ack = 2;
|
2012-01-08 21:06:19 +00:00
|
|
|
else if (parse_feature_request(features, "multi_ack"))
|
2005-10-28 02:49:16 +00:00
|
|
|
multi_ack = 1;
|
2012-01-08 21:06:19 +00:00
|
|
|
if (parse_feature_request(features, "no-done"))
|
2011-03-29 19:29:10 +00:00
|
|
|
no_done = 1;
|
2012-01-08 21:06:19 +00:00
|
|
|
if (parse_feature_request(features, "thin-pack"))
|
2006-02-20 08:38:39 +00:00
|
|
|
use_thin_pack = 1;
|
2012-01-08 21:06:19 +00:00
|
|
|
if (parse_feature_request(features, "ofs-delta"))
|
2006-09-26 15:27:39 +00:00
|
|
|
use_ofs_delta = 1;
|
2012-01-08 21:06:19 +00:00
|
|
|
if (parse_feature_request(features, "side-band-64k"))
|
2006-09-10 23:27:08 +00:00
|
|
|
use_sideband = LARGE_PACKET_MAX;
|
2012-01-08 21:06:19 +00:00
|
|
|
else if (parse_feature_request(features, "side-band"))
|
2006-09-10 23:27:08 +00:00
|
|
|
use_sideband = DEFAULT_PACKET_MAX;
|
2012-01-08 21:06:19 +00:00
|
|
|
if (parse_feature_request(features, "no-progress"))
|
2007-02-23 19:03:10 +00:00
|
|
|
no_progress = 1;
|
2012-01-08 21:06:19 +00:00
|
|
|
if (parse_feature_request(features, "include-tag"))
|
2008-03-04 03:27:33 +00:00
|
|
|
use_include_tag = 1;
|
2005-10-25 01:59:18 +00:00
|
|
|
|
|
|
|
o = lookup_object(sha1_buf);
|
2011-08-05 20:54:06 +00:00
|
|
|
if (!o)
|
2010-07-31 20:11:46 +00:00
|
|
|
die("git upload-pack: not our ref %s",
|
|
|
|
sha1_to_hex(sha1_buf));
|
2005-10-25 01:59:18 +00:00
|
|
|
if (!(o->flags & WANTED)) {
|
|
|
|
o->flags |= WANTED;
|
2011-08-05 20:54:06 +00:00
|
|
|
if (!(o->flags & OUR_REF))
|
|
|
|
has_non_tip = 1;
|
2006-07-06 01:00:02 +00:00
|
|
|
add_object_array(o, NULL, &want_obj);
|
2005-10-25 01:59:18 +00:00
|
|
|
}
|
2005-07-04 22:29:17 +00:00
|
|
|
}
|
2008-03-03 02:35:18 +00:00
|
|
|
if (debug_fd)
|
use write_str_in_full helper to avoid literal string lengths
In 2d14d65 (Use a clearer style to issue commands to remote helpers,
2009-09-03) I happened to notice two changes like this:
- write_in_full(helper->in, "list\n", 5);
+
+ strbuf_addstr(&buf, "list\n");
+ write_in_full(helper->in, buf.buf, buf.len);
+ strbuf_reset(&buf);
IMHO, it would be better to define a new function,
static inline ssize_t write_str_in_full(int fd, const char *str)
{
return write_in_full(fd, str, strlen(str));
}
and then use it like this:
- strbuf_addstr(&buf, "list\n");
- write_in_full(helper->in, buf.buf, buf.len);
- strbuf_reset(&buf);
+ write_str_in_full(helper->in, "list\n");
Thus not requiring the added allocation, and still avoiding
the maintenance risk of literal string lengths.
These days, compilers are good enough that strlen("literal")
imposes no run-time cost.
Transformed via this:
perl -pi -e \
's/write_in_full\((.*?), (".*?"), \d+\)/write_str_in_full($1, $2)/'\
$(git grep -l 'write_in_full.*"')
Signed-off-by: Jim Meyering <meyering@redhat.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-09-12 08:54:32 +00:00
|
|
|
write_str_in_full(debug_fd, "#E\n");
|
2009-06-16 18:41:16 +00:00
|
|
|
|
2011-08-05 20:54:06 +00:00
|
|
|
/*
|
|
|
|
* We have sent all our refs already, and the other end
|
|
|
|
* should have chosen out of them. When we are operating
|
|
|
|
* in the stateless RPC mode, however, their choice may
|
|
|
|
* have been based on the set of older refs advertised
|
|
|
|
* by another process that handled the initial request.
|
|
|
|
*/
|
|
|
|
if (has_non_tip)
|
|
|
|
check_non_tip();
|
|
|
|
|
2009-06-16 18:41:16 +00:00
|
|
|
if (!use_sideband && daemon_mode)
|
|
|
|
no_progress = 1;
|
|
|
|
|
2006-10-30 19:09:53 +00:00
|
|
|
if (depth == 0 && shallows.nr == 0)
|
|
|
|
return;
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 19:09:29 +00:00
|
|
|
if (depth > 0) {
|
|
|
|
struct commit_list *result, *backup;
|
2006-10-30 19:09:53 +00:00
|
|
|
int i;
|
|
|
|
backup = result = get_shallow_commits(&want_obj, depth,
|
|
|
|
SHALLOW, NOT_SHALLOW);
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 19:09:29 +00:00
|
|
|
while (result) {
|
2006-10-30 19:09:53 +00:00
|
|
|
struct object *object = &result->item->object;
|
2006-11-24 14:58:25 +00:00
|
|
|
if (!(object->flags & (CLIENT_SHALLOW|NOT_SHALLOW))) {
|
2006-10-30 19:09:53 +00:00
|
|
|
packet_write(1, "shallow %s",
|
|
|
|
sha1_to_hex(object->sha1));
|
|
|
|
register_shallow(object->sha1);
|
2009-06-09 23:50:18 +00:00
|
|
|
shallow_nr++;
|
2006-10-30 19:09:53 +00:00
|
|
|
}
|
allow cloning a repository "shallowly"
By specifying a depth, you can now clone a repository such that
all fetched ancestor-chains' length is at most "depth". For example,
if the upstream repository has only 2 branches ("A" and "B"), which
are linear, and you specify depth 3, you will get A, A~1, A~2, A~3,
B, B~1, B~2, and B~3. The ends are automatically made shallow
commits.
Signed-off-by: Johannes Schindelin <johannes.schindelin@gmx.de>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2006-10-30 19:09:29 +00:00
|
|
|
result = result->next;
|
|
|
|
}
|
|
|
|
free_commit_list(backup);
|
2006-10-30 19:09:53 +00:00
|
|
|
for (i = 0; i < shallows.nr; i++) {
|
|
|
|
struct object *object = shallows.objects[i].item;
|
|
|
|
if (object->flags & NOT_SHALLOW) {
|
|
|
|
struct commit_list *parents;
|
|
|
|
packet_write(1, "unshallow %s",
|
|
|
|
sha1_to_hex(object->sha1));
|
|
|
|
object->flags &= ~CLIENT_SHALLOW;
|
|
|
|
/* make sure the real parents are parsed */
|
|
|
|
unregister_shallow(object->sha1);
|
2006-11-14 06:47:46 +00:00
|
|
|
object->parsed = 0;
|
2008-02-18 20:48:03 +00:00
|
|
|
if (parse_commit((struct commit *)object))
|
|
|
|
die("invalid commit");
|
2006-10-30 19:09:53 +00:00
|
|
|
parents = ((struct commit *)object)->parents;
|
|
|
|
while (parents) {
|
|
|
|
add_object_array(&parents->item->object,
|
|
|
|
NULL, &want_obj);
|
|
|
|
parents = parents->next;
|
|
|
|
}
|
2009-09-03 23:08:33 +00:00
|
|
|
add_object_array(object, NULL, &extra_edge_obj);
|
2006-10-30 19:09:53 +00:00
|
|
|
}
|
|
|
|
/* make sure commit traversal conforms to client */
|
|
|
|
register_shallow(object->sha1);
|
|
|
|
}
|
|
|
|
packet_flush(1);
|
|
|
|
} else
|
|
|
|
if (shallows.nr > 0) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < shallows.nr; i++)
|
|
|
|
register_shallow(shallows.objects[i].item->sha1);
|
|
|
|
}
|
2009-06-09 23:50:18 +00:00
|
|
|
|
|
|
|
shallow_nr += shallows.nr;
|
2006-10-30 19:09:53 +00:00
|
|
|
free(shallows.objects);
|
2005-07-04 22:29:17 +00:00
|
|
|
}
|
|
|
|
|
2006-09-21 05:02:01 +00:00
|
|
|
static int send_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
|
2005-07-04 20:26:53 +00:00
|
|
|
{
|
2006-10-30 19:09:06 +00:00
|
|
|
static const char *capabilities = "multi_ack thin-pack side-band"
|
2008-03-04 03:27:33 +00:00
|
|
|
" side-band-64k ofs-delta shallow no-progress"
|
2009-10-31 00:47:25 +00:00
|
|
|
" include-tag multi_ack_detailed";
|
upload-pack: avoid parsing objects during ref advertisement
When we advertise a ref, the first thing we do is parse the
pointed-to object. This gives us two things:
1. a "struct object" we can use to store flags
2. the type of the object, so we know whether we need to
dereference it as a tag
Instead, we can just use lookup_unknown_object to get an
object struct, and then fill in just the type field using
sha1_object_info (which, in the case of packed files, can
find the information without actually inflating the object
data).
This can save time if you have a large number of refs, and
the client isn't actually going to request those refs (e.g.,
because most of them are already up-to-date).
The downside is that we are no longer verifying objects that
we advertise by fully parsing them (however, we do still
know we actually have them, because sha1_object_info must
find them to get the type). While we might fail to detect a
corrupt object here, if the client actually fetches the
object, we will parse (and verify) it then.
On a repository with 120K refs, the advertisement portion of
upload-pack goes from ~3.4s to 3.2s (the failure to speed up
more is largely due to the fact that most of these refs are
tags, which need dereferenced to find the tag destination
anyway).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-06 19:17:40 +00:00
|
|
|
struct object *o = lookup_unknown_object(sha1);
|
2011-07-08 23:13:32 +00:00
|
|
|
const char *refname_nons = strip_namespace(refname);
|
upload-pack: use peel_ref for ref advertisements
When upload-pack advertises refs, we attempt to peel tags
and advertise the peeled version. We currently hand-roll the
tag dereferencing, and use as many optimizations as we can
to avoid loading non-tag objects into memory.
Not only has peel_ref recently learned these optimizations,
too, but it also contains an even more important one: it
has access to the "peeled" data from the pack-refs file.
That means we can avoid not only loading annotated tags
entirely, but also avoid doing any kind of object lookup at
all.
This cut the CPU time to advertise refs by 50% in the
linux-2.6 repo, as measured by:
echo 0000 | git-upload-pack . >/dev/null
best-of-five, warm cache, objects and refs fully packed:
[before] [after]
real 0m0.026s real 0m0.013s
user 0m0.024s user 0m0.008s
sys 0m0.000s sys 0m0.000s
Those numbers are irrelevantly small compared to an actual
fetch. Here's a larger repo (400K refs, of which 12K are
unique, and of which only 107 are unique annotated tags):
[before] [after]
real 0m0.704s real 0m0.596s
user 0m0.600s user 0m0.496s
sys 0m0.096s sys 0m0.092s
This shows only a 15% speedup (mostly because it has fewer
actual tags to parse), but a larger absolute value (100ms,
which isn't a lot compared to a real fetch, but this
advertisement happens on every fetch, even if the client is
just finding out they are completely up to date).
In truly pathological cases, where you have a large number
of unique annotated tags, it can make an even bigger
difference. Here are the numbers for a linux-2.6 repository
that has had every seventh commit tagged (so about 50K
tags):
[before] [after]
real 0m0.443s real 0m0.097s
user 0m0.416s user 0m0.080s
sys 0m0.024s sys 0m0.012s
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-10-04 08:03:33 +00:00
|
|
|
unsigned char peeled[20];
|
2006-02-18 00:14:52 +00:00
|
|
|
|
2005-10-28 03:56:41 +00:00
|
|
|
if (capabilities)
|
2012-08-03 16:19:16 +00:00
|
|
|
packet_write(1, "%s %s%c%s%s agent=%s\n",
|
|
|
|
sha1_to_hex(sha1), refname_nons,
|
2011-03-29 17:24:59 +00:00
|
|
|
0, capabilities,
|
2012-08-03 16:19:16 +00:00
|
|
|
stateless_rpc ? " no-done" : "",
|
|
|
|
git_user_agent_sanitized());
|
2005-10-28 03:56:41 +00:00
|
|
|
else
|
2011-07-08 23:13:32 +00:00
|
|
|
packet_write(1, "%s %s\n", sha1_to_hex(sha1), refname_nons);
|
2005-10-28 03:56:41 +00:00
|
|
|
capabilities = NULL;
|
2005-10-25 01:59:18 +00:00
|
|
|
if (!(o->flags & OUR_REF)) {
|
|
|
|
o->flags |= OUR_REF;
|
|
|
|
nr_our_refs++;
|
|
|
|
}
|
upload-pack: use peel_ref for ref advertisements
When upload-pack advertises refs, we attempt to peel tags
and advertise the peeled version. We currently hand-roll the
tag dereferencing, and use as many optimizations as we can
to avoid loading non-tag objects into memory.
Not only has peel_ref recently learned these optimizations,
too, but it also contains an even more important one: it
has access to the "peeled" data from the pack-refs file.
That means we can avoid not only loading annotated tags
entirely, but also avoid doing any kind of object lookup at
all.
This cut the CPU time to advertise refs by 50% in the
linux-2.6 repo, as measured by:
echo 0000 | git-upload-pack . >/dev/null
best-of-five, warm cache, objects and refs fully packed:
[before] [after]
real 0m0.026s real 0m0.013s
user 0m0.024s user 0m0.008s
sys 0m0.000s sys 0m0.000s
Those numbers are irrelevantly small compared to an actual
fetch. Here's a larger repo (400K refs, of which 12K are
unique, and of which only 107 are unique annotated tags):
[before] [after]
real 0m0.704s real 0m0.596s
user 0m0.600s user 0m0.496s
sys 0m0.096s sys 0m0.092s
This shows only a 15% speedup (mostly because it has fewer
actual tags to parse), but a larger absolute value (100ms,
which isn't a lot compared to a real fetch, but this
advertisement happens on every fetch, even if the client is
just finding out they are completely up to date).
In truly pathological cases, where you have a large number
of unique annotated tags, it can make an even bigger
difference. Here are the numbers for a linux-2.6 repository
that has had every seventh commit tagged (so about 50K
tags):
[before] [after]
real 0m0.443s real 0m0.097s
user 0m0.416s user 0m0.080s
sys 0m0.024s sys 0m0.012s
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-10-04 08:03:33 +00:00
|
|
|
if (!peel_ref(refname, peeled))
|
|
|
|
packet_write(1, "%s %s^{}\n", sha1_to_hex(peeled), refname_nons);
|
2005-07-04 20:26:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-31 00:47:33 +00:00
|
|
|
static int mark_our_ref(const char *refname, const unsigned char *sha1, int flag, void *cb_data)
|
|
|
|
{
|
|
|
|
struct object *o = parse_object(sha1);
|
|
|
|
if (!o)
|
|
|
|
die("git upload-pack: cannot find object %s:", sha1_to_hex(sha1));
|
|
|
|
if (!(o->flags & OUR_REF)) {
|
|
|
|
o->flags |= OUR_REF;
|
|
|
|
nr_our_refs++;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-08-14 20:40:51 +00:00
|
|
|
static void upload_pack(void)
|
2005-07-04 20:26:53 +00:00
|
|
|
{
|
2009-10-31 00:47:33 +00:00
|
|
|
if (advertise_refs || !stateless_rpc) {
|
|
|
|
reset_timeout();
|
2011-07-08 23:13:32 +00:00
|
|
|
head_ref_namespaced(send_ref, NULL);
|
|
|
|
for_each_namespaced_ref(send_ref, NULL);
|
2009-10-31 00:47:33 +00:00
|
|
|
packet_flush(1);
|
|
|
|
} else {
|
2011-07-08 23:13:32 +00:00
|
|
|
head_ref_namespaced(mark_our_ref, NULL);
|
|
|
|
for_each_namespaced_ref(mark_our_ref, NULL);
|
2009-10-31 00:47:33 +00:00
|
|
|
}
|
|
|
|
if (advertise_refs)
|
|
|
|
return;
|
|
|
|
|
2006-07-06 01:00:02 +00:00
|
|
|
receive_needs();
|
2006-08-14 20:40:51 +00:00
|
|
|
if (want_obj.nr) {
|
|
|
|
get_common_commits();
|
|
|
|
create_pack_file();
|
|
|
|
}
|
2005-07-04 20:26:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, char **argv)
|
|
|
|
{
|
2005-11-17 19:37:14 +00:00
|
|
|
char *dir;
|
2005-10-19 21:27:01 +00:00
|
|
|
int i;
|
|
|
|
int strict = 0;
|
|
|
|
|
i18n: add infrastructure for translating Git with gettext
Change the skeleton implementation of i18n in Git to one that can show
localized strings to users for our C, Shell and Perl programs using
either GNU libintl or the Solaris gettext implementation.
This new internationalization support is enabled by default. If
gettext isn't available, or if Git is compiled with
NO_GETTEXT=YesPlease, Git falls back on its current behavior of
showing interface messages in English. When using the autoconf script
we'll auto-detect if the gettext libraries are installed and act
appropriately.
This change is somewhat large because as well as adding a C, Shell and
Perl i18n interface we're adding a lot of tests for them, and for
those tests to work we need a skeleton PO file to actually test
translations. A minimal Icelandic translation is included for this
purpose. Icelandic includes multi-byte characters which makes it easy
to test various edge cases, and it's a language I happen to
understand.
The rest of the commit message goes into detail about various
sub-parts of this commit.
= Installation
Gettext .mo files will be installed and looked for in the standard
$(prefix)/share/locale path. GIT_TEXTDOMAINDIR can also be set to
override that, but that's only intended to be used to test Git itself.
= Perl
Perl code that's to be localized should use the new Git::I18n
module. It imports a __ function into the caller's package by default.
Instead of using the high level Locale::TextDomain interface I've
opted to use the low-level (equivalent to the C interface)
Locale::Messages module, which Locale::TextDomain itself uses.
Locale::TextDomain does a lot of redundant work we don't need, and
some of it would potentially introduce bugs. It tries to set the
$TEXTDOMAIN based on package of the caller, and has its own
hardcoded paths where it'll search for messages.
I found it easier just to completely avoid it rather than try to
circumvent its behavior. In any case, this is an issue wholly
internal Git::I18N. Its guts can be changed later if that's deemed
necessary.
See <AANLkTilYD_NyIZMyj9dHtVk-ylVBfvyxpCC7982LWnVd@mail.gmail.com> for
a further elaboration on this topic.
= Shell
Shell code that's to be localized should use the git-sh-i18n
library. It's basically just a wrapper for the system's gettext.sh.
If gettext.sh isn't available we'll fall back on gettext(1) if it's
available. The latter is available without the former on Solaris,
which has its own non-GNU gettext implementation. We also need to
emulate eval_gettext() there.
If neither are present we'll use a dumb printf(1) fall-through
wrapper.
= About libcharset.h and langinfo.h
We use libcharset to query the character set of the current locale if
it's available. I.e. we'll use it instead of nl_langinfo if
HAVE_LIBCHARSET_H is set.
The GNU gettext manual recommends using langinfo.h's
nl_langinfo(CODESET) to acquire the current character set, but on
systems that have libcharset.h's locale_charset() using the latter is
either saner, or the only option on those systems.
GNU and Solaris have a nl_langinfo(CODESET), FreeBSD can use either,
but MinGW and some others need to use libcharset.h's locale_charset()
instead.
=Credits
This patch is based on work by Jeff Epler <jepler@unpythonic.net> who
did the initial Makefile / C work, and a lot of comments from the Git
mailing list, including Jonathan Nieder, Jakub Narebski, Johannes
Sixt, Erik Faye-Lund, Peter Krefting, Junio C Hamano, Thomas Rast and
others.
[jc: squashed a small Makefile fix from Ramsay]
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Ramsay Jones <ramsay@ramsay1.demon.co.uk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-11-17 23:14:42 +00:00
|
|
|
git_setup_gettext();
|
|
|
|
|
2011-02-24 14:30:19 +00:00
|
|
|
packet_trace_identity("upload-pack");
|
2009-01-18 12:00:12 +00:00
|
|
|
git_extract_argv0_path(argv[0]);
|
2009-01-23 09:07:46 +00:00
|
|
|
read_replace_refs = 0;
|
2009-01-18 12:00:12 +00:00
|
|
|
|
2005-10-19 21:27:01 +00:00
|
|
|
for (i = 1; i < argc; i++) {
|
|
|
|
char *arg = argv[i];
|
|
|
|
|
|
|
|
if (arg[0] != '-')
|
|
|
|
break;
|
2009-10-31 00:47:33 +00:00
|
|
|
if (!strcmp(arg, "--advertise-refs")) {
|
|
|
|
advertise_refs = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp(arg, "--stateless-rpc")) {
|
|
|
|
stateless_rpc = 1;
|
|
|
|
continue;
|
|
|
|
}
|
2005-10-19 21:27:01 +00:00
|
|
|
if (!strcmp(arg, "--strict")) {
|
|
|
|
strict = 1;
|
|
|
|
continue;
|
|
|
|
}
|
Mechanical conversion to use prefixcmp()
This mechanically converts strncmp() to use prefixcmp(), but only when
the parameters match specific patterns, so that they can be verified
easily. Leftover from this will be fixed in a separate step, including
idiotic conversions like
if (!strncmp("foo", arg, 3))
=>
if (!(-prefixcmp(arg, "foo")))
This was done by using this script in px.perl
#!/usr/bin/perl -i.bak -p
if (/strncmp\(([^,]+), "([^\\"]*)", (\d+)\)/ && (length($2) == $3)) {
s|strncmp\(([^,]+), "([^\\"]*)", (\d+)\)|prefixcmp($1, "$2")|;
}
if (/strncmp\("([^\\"]*)", ([^,]+), (\d+)\)/ && (length($1) == $3)) {
s|strncmp\("([^\\"]*)", ([^,]+), (\d+)\)|(-prefixcmp($2, "$1"))|;
}
and running:
$ git grep -l strncmp -- '*.c' | xargs perl px.perl
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-02-20 09:53:29 +00:00
|
|
|
if (!prefixcmp(arg, "--timeout=")) {
|
2005-10-19 21:27:01 +00:00
|
|
|
timeout = atoi(arg+10);
|
2009-06-16 18:41:16 +00:00
|
|
|
daemon_mode = 1;
|
2005-10-19 21:27:01 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!strcmp(arg, "--")) {
|
|
|
|
i++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-06-07 07:04:01 +00:00
|
|
|
|
2005-10-19 21:27:01 +00:00
|
|
|
if (i != argc-1)
|
2005-07-04 20:26:53 +00:00
|
|
|
usage(upload_pack_usage);
|
2008-02-12 11:28:01 +00:00
|
|
|
|
2008-07-21 19:19:52 +00:00
|
|
|
setup_path();
|
2008-02-12 11:28:01 +00:00
|
|
|
|
2005-10-19 21:27:01 +00:00
|
|
|
dir = argv[i];
|
2005-07-08 23:22:22 +00:00
|
|
|
|
2005-11-17 19:37:14 +00:00
|
|
|
if (!enter_repo(dir, strict))
|
2009-03-04 08:32:29 +00:00
|
|
|
die("'%s' does not appear to be a git repository", dir);
|
2007-01-22 06:23:58 +00:00
|
|
|
if (is_repository_shallow())
|
|
|
|
die("attempt to fetch/clone from a shallow repository");
|
2008-03-03 02:35:18 +00:00
|
|
|
if (getenv("GIT_DEBUG_SEND_PACK"))
|
|
|
|
debug_fd = atoi(getenv("GIT_DEBUG_SEND_PACK"));
|
2005-07-04 20:26:53 +00:00
|
|
|
upload_pack();
|
|
|
|
return 0;
|
|
|
|
}
|