2023-04-11 07:41:50 +00:00
|
|
|
#include "git-compat-util.h"
|
2009-08-05 05:01:53 +00:00
|
|
|
#include "transport.h"
|
2009-10-31 00:47:30 +00:00
|
|
|
#include "quote.h"
|
2009-08-05 05:01:53 +00:00
|
|
|
#include "run-command.h"
|
|
|
|
#include "commit.h"
|
2023-03-21 06:26:03 +00:00
|
|
|
#include "environment.h"
|
2023-03-21 06:25:54 +00:00
|
|
|
#include "gettext.h"
|
2023-02-24 00:09:27 +00:00
|
|
|
#include "hex.h"
|
2023-04-11 07:41:49 +00:00
|
|
|
#include "object-name.h"
|
2023-05-16 06:34:00 +00:00
|
|
|
#include "repository.h"
|
2009-11-18 01:42:28 +00:00
|
|
|
#include "remote.h"
|
2010-03-29 16:48:27 +00:00
|
|
|
#include "string-list.h"
|
2010-11-17 17:15:34 +00:00
|
|
|
#include "thread-utils.h"
|
disconnect from remote helpers more gently
When git spawns a remote helper program (like git-remote-http),
the last thing we do before closing the pipe to the child
process is to send a blank line, telling the helper that we
are done issuing commands. However, the helper may already
have exited, in which case the parent git process will
receive SIGPIPE and die.
In particular, this can happen with the remote-curl helper
when it encounters errors during a push. The helper reports
individual errors for each ref back to git-push, and then
exits with a non-zero exit code. Depending on the exact
timing of the write, the parent process may or may not
receive SIGPIPE.
This causes intermittent test failure in t5541.8, and is a
side effect of 5238cbf (remote-curl: Fix push status report
when all branches fail). Before that commit, remote-curl
would not send the final blank line to indicate that the
list of status lines was complete; it would just exit,
closing the pipe. The parent git-push would notice the
closed pipe while reading the status report and exit
immediately itself, propagating the failing exit code. But
post-5238cbf, remote-curl completes the status list before
exiting, git-push actually runs to completion, and then it
tries to cleanly disconnect the helper, leading to the
SIGPIPE race above.
This patch drops all error-checking when sending the final
"we are about to hang up" blank line to helpers. There is
nothing useful for the parent process to do about errors at
that point anyway, and certainly failing to send our "we are
done with commands" line to a helper that has already exited
is not a problem.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-23 10:04:34 +00:00
|
|
|
#include "sigchain.h"
|
2020-07-28 20:23:39 +00:00
|
|
|
#include "strvec.h"
|
2013-04-18 04:14:33 +00:00
|
|
|
#include "refs.h"
|
2018-05-16 22:57:48 +00:00
|
|
|
#include "refspec.h"
|
2017-12-14 21:44:45 +00:00
|
|
|
#include "transport-internal.h"
|
2018-03-15 17:31:34 +00:00
|
|
|
#include "protocol.h"
|
transport-helper: re-examine object dir after fetching
This patch fixes a bug where fetch over http (or any helper) using the
v0 protocol may sometimes fail to auto-follow tags. The bug comes from
61c7711cfe (sha1-file: use loose object cache for quick existence check,
2018-11-12). But to explain why (and why this is the right fix), let's
take a step back.
After fetching a pack, the object database has changed, but we may still
hold in-memory caches that are now out of date. Traditionally this was
just the packed_git list, but 61c7711cfe started using a loose-object
cache, as well.
Usually these caches are invalidated automatically. When an expected
object cannot be found, the low-level object lookup routines call
reprepare_packed_git(), which re-scans the set of packs (and thanks to
some preparatory patches ahead of 61c7711cfe, throws away the loose
object cache). But not all calls do this! In some cases we expect that
the object might not exist, and pass OBJECT_INFO_QUICK to tell the
low-level routines not to bother re-scanning. And the tag auto-following
code is one such caller, since we are asking about oids that the other
side has (but we might not have locally).
To deal with this, we explicitly call reprepare_packed_git() ourselves
after fetching a pack; this goes all the way back to 48ec3e5c07
(Incorporate fetched packs in future object traversal, 2008-06-15). But
that only helps if we call fetch_pack() in the main fetch process. When
we're using a transport helper, it happens in a separate sub-process,
and the parent process is left with old values. So this is only a
problem with protocols which require a separate helper process (like
http).
This patch fixes it by teaching the parent process in the transport
helper relationship to make that same reprepare call after the helper
finishes fetching.
You might be left with some lingering questions, like:
1. Why only the v0 protocol, and not v2? It's because in v2 the child
helper doesn't actually run fetch_pack(); it merely establishes a
tunnel over which the main process can talk to the remote side (so
the fetch_pack() and reprepare happen in the main process).
2. Wouldn't we have the same bug even before the 61c7711cfe added
the loose object cache? For example, when we store the fetch as a
pack locally, wouldn't our packed_git list still be out of date?
If we store a pack, everything works because other parts of the
fetch process happen to trigger a call to reprepare_packed_git().
In particular, before storing whatever ref was originally
requested, we'll make sure we have the pointed-to object, and that
call happens without the QUICK flag. So in that case we'll see that
we don't know about it, reprepare, and then repeat our lookup. And
now we _do_ know about the pack, and further calls with QUICK will
find its contents.
Whereas when we unpack the result into loose objects, we never get
that same invalidation trigger. We didn't have packs before, and we
don't after. But when we do the loose object lookup, we find the
object. There's no way to realize that we didn't have the object
before the pack, and that having it now means things have changed
(in theory we could do a superfluous cache lookup to see that it
was missing from the old cache; but depending on the tags the other
side showed us, we might not even have filled in that part of the
cache earlier).
3. Why does the included test use "--depth 1"? This is important
because without it, we happen to invalidate the cache as a side
effect of other parts of the fetch process. What happens in a
non-shallow fetch is something like this:
1. we call find_non_local_tags() once before actually getting the
pack, to see if there are any tags we can fill in from what we
already have. This fills in the cache (which is obviously
missing objects we're about to fetch).
2. before fetching the actual pack, fetch_and_consume_refs()
calls check_exist_and_connected(), to see if we even need to
fetch a pack at all. This doesn't use QUICK (though arguably
it could, as it's purely an optimization). And since it sees
there are objects we are indeed missing, that triggers a
reprepare_packed_git() call, which throws out the loose object
cache.
3. after fetching, now we call find_non_local_tags() again. And
since step (2) invalidated our loose object cache, we find
the new objects and create the tags.
So everything works, but mostly due to luck. Whereas in a fetch
with --depth, we skip step 2 entirely, and thus the out-of-date
cache is still in place for step 3, giving us the wrong answer.
So the test works with a small "--depth 1" fetch, which makes sure that
we don't store the pack from the other side, and that we don't trigger
the accidental cache invalidation. And of course it forces the use of
v0 along with using the http protocol.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-01-24 01:00:56 +00:00
|
|
|
#include "packfile.h"
|
2010-11-17 17:15:34 +00:00
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
static int debug;
|
|
|
|
|
2011-03-16 07:08:34 +00:00
|
|
|
struct helper_data {
|
2009-08-05 05:01:53 +00:00
|
|
|
const char *name;
|
|
|
|
struct child_process *helper;
|
2009-10-31 00:47:28 +00:00
|
|
|
FILE *out;
|
2009-10-31 00:47:29 +00:00
|
|
|
unsigned fetch : 1,
|
2009-12-07 06:40:16 +00:00
|
|
|
import : 1,
|
2012-09-19 15:21:19 +00:00
|
|
|
bidi_import : 1,
|
2010-03-29 16:48:27 +00:00
|
|
|
export : 1,
|
2009-10-31 00:47:30 +00:00
|
|
|
option : 1,
|
2009-12-09 15:26:32 +00:00
|
|
|
push : 1,
|
|
|
|
connect : 1,
|
2018-03-15 17:31:34 +00:00
|
|
|
stateless_connect : 1,
|
2013-04-14 10:57:08 +00:00
|
|
|
signed_tags : 1,
|
2013-07-21 08:18:05 +00:00
|
|
|
check_connectivity : 1,
|
2013-09-03 15:45:14 +00:00
|
|
|
no_disconnect_req : 1,
|
2020-05-25 19:59:03 +00:00
|
|
|
no_private_update : 1,
|
|
|
|
object_format : 1;
|
transport-helper: skip ls-refs if unnecessary
Commit e70a3030e7 ("fetch: do not list refs if fetching only hashes",
2018-10-07) and its ancestors taught Git, as an optimization, to skip
the ls-refs step when it is not necessary during a protocol v2 fetch
(for example, when lazy fetching a missing object in a partial clone, or
when running "git fetch --no-tags <remote> <SHA-1>"). But that was only
done for natively supported protocols; in particular, HTTP was not
supported.
Teach Git to skip ls-refs when using remote helpers that support connect
or stateless-connect. To do this, fetch() is made an acceptable entry
point. Because fetch() can now be the first function in the vtable
called, "get_helper(transport);" has to be added to the beginning of
that function to set the transport up (if not yet set up) before
process_connect() is invoked.
When fetch() is called, the transport could be taken over (this happens
if "connect" or "stateless-connect" is successfully run without any
"fallback" response), or not. If the transport is taken over, execution
continues like execution for natively supported protocols
(fetch_refs_via_pack() is executed, which will fetch refs using ls-refs
if needed). If not, the remote helper interface will invoke
get_refs_list() if it hasn't been invoked yet, preserving existing
behavior.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-21 22:20:09 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* As an optimization, the transport code may invoke fetch before
|
|
|
|
* get_refs_list. If this happens, and if the transport helper doesn't
|
|
|
|
* support connect or stateless_connect, we need to invoke
|
|
|
|
* get_refs_list ourselves if we haven't already done so. Keep track of
|
|
|
|
* whether we have invoked get_refs_list.
|
|
|
|
*/
|
|
|
|
unsigned get_refs_list_called : 1;
|
|
|
|
|
2011-07-16 13:03:40 +00:00
|
|
|
char *export_marks;
|
|
|
|
char *import_marks;
|
2009-11-18 01:42:28 +00:00
|
|
|
/* These go from remote name (as in "list") to private name */
|
2018-05-16 22:58:03 +00:00
|
|
|
struct refspec rs;
|
2009-12-09 15:26:31 +00:00
|
|
|
/* Transport options for fetch-pack/send-pack (should one of
|
|
|
|
* those be invoked).
|
|
|
|
*/
|
|
|
|
struct git_transport_options transport_options;
|
2009-08-05 05:01:53 +00:00
|
|
|
};
|
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
static void sendline(struct helper_data *helper, struct strbuf *buffer)
|
|
|
|
{
|
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Remote helper: -> %s", buffer->buf);
|
avoid "write_in_full(fd, buf, len) != len" pattern
The return value of write_in_full() is either "-1", or the
requested number of bytes[1]. If we make a partial write
before seeing an error, we still return -1, not a partial
value. This goes back to f6aa66cb95 (write_in_full: really
write in full or return error on disk full., 2007-01-11).
So checking anything except "was the return value negative"
is pointless. And there are a couple of reasons not to do
so:
1. It can do a funny signed/unsigned comparison. If your
"len" is signed (e.g., a size_t) then the compiler will
promote the "-1" to its unsigned variant.
This works out for "!= len" (unless you really were
trying to write the maximum size_t bytes), but is a
bug if you check "< len" (an example of which was fixed
recently in config.c).
We should avoid promoting the mental model that you
need to check the length at all, so that new sites are
not tempted to copy us.
2. Checking for a negative value is shorter to type,
especially when the length is an expression.
3. Linus says so. In d34cf19b89 (Clean up write_in_full()
users, 2007-01-11), right after the write_in_full()
semantics were changed, he wrote:
I really wish every "write_in_full()" user would just
check against "<0" now, but this fixes the nasty and
stupid ones.
Appeals to authority aside, this makes it clear that
writing it this way does not have an intentional
benefit. It's a historical curiosity that we never
bothered to clean up (and which was undoubtedly
cargo-culted into new sites).
So let's convert these obviously-correct cases (this
includes write_str_in_full(), which is just a wrapper for
write_in_full()).
[1] A careful reader may notice there is one way that
write_in_full() can return a different value. If we ask
write() to write N bytes and get a return value that is
_larger_ than N, we could return a larger total. But
besides the fact that this would imply a totally broken
version of write(), it would already invoke undefined
behavior. Our internal remaining counter is an unsigned
size_t, which means that subtracting too many byte will
wrap it around to a very large number. So we'll instantly
begin reading off the end of the buffer, trying to write
gigabytes (or petabytes) of data.
Signed-off-by: Jeff King <peff@peff.net>
Reviewed-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-13 17:16:03 +00:00
|
|
|
if (write_in_full(helper->helper->in, buffer->buf, buffer->len) < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die_errno(_("full write to remote helper failed"));
|
2009-12-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
2018-03-15 17:31:32 +00:00
|
|
|
static int recvline_fh(FILE *helper, struct strbuf *buffer)
|
2009-12-09 15:26:27 +00:00
|
|
|
{
|
|
|
|
strbuf_reset(buffer);
|
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Remote helper: Waiting...\n");
|
2015-10-28 20:36:00 +00:00
|
|
|
if (strbuf_getline(buffer, helper) == EOF) {
|
2009-12-09 15:26:27 +00:00
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Remote helper quit.\n");
|
2014-04-12 20:33:29 +00:00
|
|
|
return 1;
|
2009-12-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Remote helper: <- %s\n", buffer->buf);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-09 15:26:32 +00:00
|
|
|
static int recvline(struct helper_data *helper, struct strbuf *buffer)
|
|
|
|
{
|
2018-03-15 17:31:32 +00:00
|
|
|
return recvline_fh(helper->out, buffer);
|
2009-12-09 15:26:32 +00:00
|
|
|
}
|
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
static void write_constant(int fd, const char *str)
|
|
|
|
{
|
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Remote helper: -> %s", str);
|
avoid "write_in_full(fd, buf, len) != len" pattern
The return value of write_in_full() is either "-1", or the
requested number of bytes[1]. If we make a partial write
before seeing an error, we still return -1, not a partial
value. This goes back to f6aa66cb95 (write_in_full: really
write in full or return error on disk full., 2007-01-11).
So checking anything except "was the return value negative"
is pointless. And there are a couple of reasons not to do
so:
1. It can do a funny signed/unsigned comparison. If your
"len" is signed (e.g., a size_t) then the compiler will
promote the "-1" to its unsigned variant.
This works out for "!= len" (unless you really were
trying to write the maximum size_t bytes), but is a
bug if you check "< len" (an example of which was fixed
recently in config.c).
We should avoid promoting the mental model that you
need to check the length at all, so that new sites are
not tempted to copy us.
2. Checking for a negative value is shorter to type,
especially when the length is an expression.
3. Linus says so. In d34cf19b89 (Clean up write_in_full()
users, 2007-01-11), right after the write_in_full()
semantics were changed, he wrote:
I really wish every "write_in_full()" user would just
check against "<0" now, but this fixes the nasty and
stupid ones.
Appeals to authority aside, this makes it clear that
writing it this way does not have an intentional
benefit. It's a historical curiosity that we never
bothered to clean up (and which was undoubtedly
cargo-culted into new sites).
So let's convert these obviously-correct cases (this
includes write_str_in_full(), which is just a wrapper for
write_in_full()).
[1] A careful reader may notice there is one way that
write_in_full() can return a different value. If we ask
write() to write N bytes and get a return value that is
_larger_ than N, we could return a larger total. But
besides the fact that this would imply a totally broken
version of write(), it would already invoke undefined
behavior. Our internal remaining counter is an unsigned
size_t, which means that subtracting too many byte will
wrap it around to a very large number. So we'll instantly
begin reading off the end of the buffer, trying to write
gigabytes (or petabytes) of data.
Signed-off-by: Jeff King <peff@peff.net>
Reviewed-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-09-13 17:16:03 +00:00
|
|
|
if (write_in_full(fd, str, strlen(str)) < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die_errno(_("full write to remote helper failed"));
|
2009-12-09 15:26:27 +00:00
|
|
|
}
|
|
|
|
|
Fix sparse warnings
Fix warnings from 'make check'.
- These files don't include 'builtin.h' causing sparse to complain that
cmd_* isn't declared:
builtin/clone.c:364, builtin/fetch-pack.c:797,
builtin/fmt-merge-msg.c:34, builtin/hash-object.c:78,
builtin/merge-index.c:69, builtin/merge-recursive.c:22
builtin/merge-tree.c:341, builtin/mktag.c:156, builtin/notes.c:426
builtin/notes.c:822, builtin/pack-redundant.c:596,
builtin/pack-refs.c:10, builtin/patch-id.c:60, builtin/patch-id.c:149,
builtin/remote.c:1512, builtin/remote-ext.c:240,
builtin/remote-fd.c:53, builtin/reset.c:236, builtin/send-pack.c:384,
builtin/unpack-file.c:25, builtin/var.c:75
- These files have symbols which should be marked static since they're
only file scope:
submodule.c:12, diff.c:631, replace_object.c:92, submodule.c:13,
submodule.c:14, trace.c:78, transport.c:195, transport-helper.c:79,
unpack-trees.c:19, url.c:3, url.c:18, url.c:104, url.c:117, url.c:123,
url.c:129, url.c:136, thread-utils.c:21, thread-utils.c:48
- These files redeclare symbols to be different types:
builtin/index-pack.c:210, parse-options.c:564, parse-options.c:571,
usage.c:49, usage.c:58, usage.c:63, usage.c:72
- These files use a literal integer 0 when they really should use a NULL
pointer:
daemon.c:663, fast-import.c:2942, imap-send.c:1072, notes-merge.c:362
While we're in the area, clean up some unused #includes in builtin files
(mostly exec_cmd.h).
Signed-off-by: Stephen Boyd <bebarino@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2011-03-22 07:51:05 +00:00
|
|
|
static const char *remove_ext_force(const char *url)
|
2009-12-09 15:26:29 +00:00
|
|
|
{
|
|
|
|
if (url) {
|
|
|
|
const char *colon = strchr(url, ':');
|
|
|
|
if (colon && colon[1] == ':')
|
|
|
|
return colon + 2;
|
|
|
|
}
|
|
|
|
return url;
|
|
|
|
}
|
|
|
|
|
2009-12-09 15:26:32 +00:00
|
|
|
static void do_take_over(struct transport *transport)
|
|
|
|
{
|
|
|
|
struct helper_data *data;
|
|
|
|
data = (struct helper_data *)transport->data;
|
|
|
|
transport_take_over(transport, data->helper);
|
|
|
|
fclose(data->out);
|
|
|
|
free(data);
|
|
|
|
}
|
|
|
|
|
2015-02-13 05:24:45 +00:00
|
|
|
static void standard_options(struct transport *t);
|
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
static struct child_process *get_helper(struct transport *transport)
|
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
struct child_process *helper;
|
2009-12-09 15:26:31 +00:00
|
|
|
int duped;
|
2010-01-12 19:53:29 +00:00
|
|
|
int code;
|
2009-08-05 05:01:53 +00:00
|
|
|
|
|
|
|
if (data->helper)
|
|
|
|
return data->helper;
|
|
|
|
|
2014-08-19 19:10:48 +00:00
|
|
|
helper = xmalloc(sizeof(*helper));
|
|
|
|
child_process_init(helper);
|
2009-08-05 05:01:53 +00:00
|
|
|
helper->in = -1;
|
|
|
|
helper->out = -1;
|
|
|
|
helper->err = 0;
|
2020-08-26 19:46:49 +00:00
|
|
|
strvec_pushf(&helper->args, "remote-%s", data->name);
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(&helper->args, transport->remote->name);
|
|
|
|
strvec_push(&helper->args, remove_ext_force(transport->url));
|
2020-08-26 19:46:49 +00:00
|
|
|
helper->git_cmd = 1;
|
2010-01-12 19:53:29 +00:00
|
|
|
helper->silent_exec_failure = 1;
|
2011-07-16 13:03:28 +00:00
|
|
|
|
2017-02-14 20:36:19 +00:00
|
|
|
if (have_git_dir())
|
2022-06-02 09:09:50 +00:00
|
|
|
strvec_pushf(&helper->env, "%s=%s",
|
strvec: fix indentation in renamed calls
Code which split an argv_array call across multiple lines, like:
argv_array_pushl(&args, "one argument",
"another argument", "and more",
NULL);
was recently mechanically renamed to use strvec, which results in
mis-matched indentation like:
strvec_pushl(&args, "one argument",
"another argument", "and more",
NULL);
Let's fix these up to align the arguments with the opening paren. I did
this manually by sifting through the results of:
git jump grep 'strvec_.*,$'
and liberally applying my editor's auto-format. Most of the changes are
of the form shown above, though I also normalized a few that had
originally used a single-tab indentation (rather than our usual style of
aligning with the open paren). I also rewrapped a couple of obvious
cases (e.g., where previously too-long lines became short enough to fit
on one), but I wasn't aggressive about it. In cases broken to three or
more lines, the grouping of arguments is sometimes meaningful, and it
wasn't worth my time or reviewer time to ponder each case individually.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-07-28 20:26:31 +00:00
|
|
|
GIT_DIR_ENVIRONMENT, get_git_dir());
|
2011-07-16 13:03:28 +00:00
|
|
|
|
2020-07-29 00:37:20 +00:00
|
|
|
helper->trace2_child_class = helper->args.v[0]; /* "remote-<name>" */
|
2019-02-22 22:25:05 +00:00
|
|
|
|
2010-01-12 19:53:29 +00:00
|
|
|
code = start_command(helper);
|
|
|
|
if (code < 0 && errno == ENOENT)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("unable to find remote helper for '%s'"), data->name);
|
2010-01-12 19:53:29 +00:00
|
|
|
else if (code != 0)
|
|
|
|
exit(code);
|
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
data->helper = helper;
|
2009-12-09 15:26:32 +00:00
|
|
|
data->no_disconnect_req = 0;
|
2018-05-16 22:58:03 +00:00
|
|
|
refspec_init(&data->rs, REFSPEC_FETCH);
|
2009-08-05 05:01:53 +00:00
|
|
|
|
2009-12-09 15:26:31 +00:00
|
|
|
/*
|
2016-01-14 02:32:23 +00:00
|
|
|
* Open the output as FILE* so strbuf_getline_*() family of
|
|
|
|
* functions can be used.
|
2009-12-09 15:26:31 +00:00
|
|
|
* Do this with duped fd because fclose() will close the fd,
|
|
|
|
* and stuff like taking over will require the fd to remain.
|
|
|
|
*/
|
|
|
|
duped = dup(helper->out);
|
|
|
|
if (duped < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die_errno(_("can't dup helper output fd"));
|
2009-12-09 15:26:31 +00:00
|
|
|
data->out = xfdopen(duped, "r");
|
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
write_constant(helper->in, "capabilities\n");
|
2009-09-04 02:13:51 +00:00
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
while (1) {
|
2014-06-18 19:47:17 +00:00
|
|
|
const char *capname, *arg;
|
2009-12-09 15:26:28 +00:00
|
|
|
int mandatory = 0;
|
2014-04-12 20:33:29 +00:00
|
|
|
if (recvline(data, &buf))
|
|
|
|
exit(128);
|
2009-08-05 05:01:53 +00:00
|
|
|
|
|
|
|
if (!*buf.buf)
|
|
|
|
break;
|
2009-12-09 15:26:28 +00:00
|
|
|
|
|
|
|
if (*buf.buf == '*') {
|
|
|
|
capname = buf.buf + 1;
|
|
|
|
mandatory = 1;
|
|
|
|
} else
|
|
|
|
capname = buf.buf;
|
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
if (debug)
|
2009-12-09 15:26:28 +00:00
|
|
|
fprintf(stderr, "Debug: Got cap %s\n", capname);
|
|
|
|
if (!strcmp(capname, "fetch"))
|
2009-08-05 05:01:53 +00:00
|
|
|
data->fetch = 1;
|
2009-12-09 15:26:28 +00:00
|
|
|
else if (!strcmp(capname, "option"))
|
2009-10-31 00:47:29 +00:00
|
|
|
data->option = 1;
|
2009-12-09 15:26:28 +00:00
|
|
|
else if (!strcmp(capname, "push"))
|
2009-10-31 00:47:30 +00:00
|
|
|
data->push = 1;
|
2009-12-09 15:26:28 +00:00
|
|
|
else if (!strcmp(capname, "import"))
|
2009-11-18 01:42:27 +00:00
|
|
|
data->import = 1;
|
2012-09-19 15:21:19 +00:00
|
|
|
else if (!strcmp(capname, "bidi-import"))
|
|
|
|
data->bidi_import = 1;
|
2010-03-29 16:48:27 +00:00
|
|
|
else if (!strcmp(capname, "export"))
|
|
|
|
data->export = 1;
|
2013-07-21 08:18:05 +00:00
|
|
|
else if (!strcmp(capname, "check-connectivity"))
|
|
|
|
data->check_connectivity = 1;
|
2018-05-16 22:58:03 +00:00
|
|
|
else if (skip_prefix(capname, "refspec ", &arg)) {
|
|
|
|
refspec_append(&data->rs, arg);
|
2009-12-09 15:26:32 +00:00
|
|
|
} else if (!strcmp(capname, "connect")) {
|
|
|
|
data->connect = 1;
|
2018-03-15 17:31:34 +00:00
|
|
|
} else if (!strcmp(capname, "stateless-connect")) {
|
|
|
|
data->stateless_connect = 1;
|
2013-04-14 10:57:08 +00:00
|
|
|
} else if (!strcmp(capname, "signed-tags")) {
|
|
|
|
data->signed_tags = 1;
|
2014-06-18 19:47:17 +00:00
|
|
|
} else if (skip_prefix(capname, "export-marks ", &arg)) {
|
|
|
|
data->export_marks = xstrdup(arg);
|
|
|
|
} else if (skip_prefix(capname, "import-marks ", &arg)) {
|
|
|
|
data->import_marks = xstrdup(arg);
|
2013-11-30 20:55:40 +00:00
|
|
|
} else if (starts_with(capname, "no-private-update")) {
|
2013-09-03 15:45:14 +00:00
|
|
|
data->no_private_update = 1;
|
2020-05-25 19:59:03 +00:00
|
|
|
} else if (starts_with(capname, "object-format")) {
|
|
|
|
data->object_format = 1;
|
2009-12-09 15:26:28 +00:00
|
|
|
} else if (mandatory) {
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("unknown mandatory capability %s; this remote "
|
|
|
|
"helper probably needs newer version of Git"),
|
2009-12-09 15:26:28 +00:00
|
|
|
capname);
|
2009-11-18 01:42:28 +00:00
|
|
|
}
|
|
|
|
}
|
2018-05-16 22:58:03 +00:00
|
|
|
if (!data->rs.nr && (data->import || data->bidi_import || data->export)) {
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("this remote helper should implement refspec capability"));
|
2009-08-05 05:01:53 +00:00
|
|
|
}
|
2009-11-18 01:42:29 +00:00
|
|
|
strbuf_release(&buf);
|
2009-12-09 15:26:27 +00:00
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Capabilities complete.\n");
|
2015-02-13 05:24:45 +00:00
|
|
|
standard_options(transport);
|
2009-08-05 05:01:53 +00:00
|
|
|
return data->helper;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int disconnect_helper(struct transport *transport)
|
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
2011-07-16 13:03:35 +00:00
|
|
|
int res = 0;
|
2009-12-09 15:26:27 +00:00
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
if (data->helper) {
|
2009-12-09 15:26:27 +00:00
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Disconnecting.\n");
|
2009-12-09 15:26:32 +00:00
|
|
|
if (!data->no_disconnect_req) {
|
disconnect from remote helpers more gently
When git spawns a remote helper program (like git-remote-http),
the last thing we do before closing the pipe to the child
process is to send a blank line, telling the helper that we
are done issuing commands. However, the helper may already
have exited, in which case the parent git process will
receive SIGPIPE and die.
In particular, this can happen with the remote-curl helper
when it encounters errors during a push. The helper reports
individual errors for each ref back to git-push, and then
exits with a non-zero exit code. Depending on the exact
timing of the write, the parent process may or may not
receive SIGPIPE.
This causes intermittent test failure in t5541.8, and is a
side effect of 5238cbf (remote-curl: Fix push status report
when all branches fail). Before that commit, remote-curl
would not send the final blank line to indicate that the
list of status lines was complete; it would just exit,
closing the pipe. The parent git-push would notice the
closed pipe while reading the status report and exit
immediately itself, propagating the failing exit code. But
post-5238cbf, remote-curl completes the status list before
exiting, git-push actually runs to completion, and then it
tries to cleanly disconnect the helper, leading to the
SIGPIPE race above.
This patch drops all error-checking when sending the final
"we are about to hang up" blank line to helpers. There is
nothing useful for the parent process to do about errors at
that point anyway, and certainly failing to send our "we are
done with commands" line to a helper that has already exited
is not a problem.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-02-23 10:04:34 +00:00
|
|
|
/*
|
|
|
|
* Ignore write errors; there's nothing we can do,
|
|
|
|
* since we're about to close the pipe anyway. And the
|
|
|
|
* most likely error is EPIPE due to the helper dying
|
|
|
|
* to report an error itself.
|
|
|
|
*/
|
|
|
|
sigchain_push(SIGPIPE, SIG_IGN);
|
|
|
|
xwrite(data->helper->in, "\n", 1);
|
|
|
|
sigchain_pop(SIGPIPE);
|
2009-12-09 15:26:32 +00:00
|
|
|
}
|
2009-08-05 05:01:53 +00:00
|
|
|
close(data->helper->in);
|
2009-12-09 15:26:31 +00:00
|
|
|
close(data->helper->out);
|
2009-10-31 00:47:28 +00:00
|
|
|
fclose(data->out);
|
2011-07-16 13:03:35 +00:00
|
|
|
res = finish_command(data->helper);
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(data->helper);
|
2009-08-05 05:01:53 +00:00
|
|
|
}
|
2011-07-16 13:03:35 +00:00
|
|
|
return res;
|
2009-08-05 05:01:53 +00:00
|
|
|
}
|
|
|
|
|
2009-10-31 00:47:29 +00:00
|
|
|
static const char *unsupported_options[] = {
|
|
|
|
TRANS_OPT_UPLOADPACK,
|
|
|
|
TRANS_OPT_RECEIVEPACK,
|
|
|
|
TRANS_OPT_THIN,
|
|
|
|
TRANS_OPT_KEEP
|
|
|
|
};
|
2013-10-31 09:25:40 +00:00
|
|
|
|
2009-10-31 00:47:29 +00:00
|
|
|
static const char *boolean_options[] = {
|
|
|
|
TRANS_OPT_THIN,
|
|
|
|
TRANS_OPT_KEEP,
|
2014-09-15 21:59:00 +00:00
|
|
|
TRANS_OPT_FOLLOWTAGS,
|
fetch, upload-pack: --deepen=N extends shallow boundary by N commits
In git-fetch, --depth argument is always relative with the latest
remote refs. This makes it a bit difficult to cover this use case,
where the user wants to make the shallow history, say 3 levels
deeper. It would work if remote refs have not moved yet, but nobody
can guarantee that, especially when that use case is performed a
couple months after the last clone or "git fetch --depth". Also,
modifying shallow boundary using --depth does not work well with
clones created by --since or --not.
This patch fixes that. A new argument --deepen=<N> will add <N> more (*)
parent commits to the current history regardless of where remote refs
are.
Have/Want negotiation is still respected. So if remote refs move, the
server will send two chunks: one between "have" and "want" and another
to extend shallow history. In theory, the client could send no "want"s
in order to get the second chunk only. But the protocol does not allow
that. Either you send no want lines, which means ls-remote; or you
have to send at least one want line that carries deep-relative to the
server..
The main work was done by Dongcan Jiang. I fixed it up here and there.
And of course all the bugs belong to me.
(*) We could even support --deepen=<N> where <N> is negative. In that
case we can cut some history from the shallow clone. This operation
(and --depth=<shorter depth>) does not require interaction with remote
side (and more complicated to implement as a result).
Helped-by: Duy Nguyen <pclouds@gmail.com>
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Helped-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Dongcan Jiang <dongcan.jiang@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-06-12 10:54:09 +00:00
|
|
|
TRANS_OPT_DEEPEN_RELATIVE
|
2009-10-31 00:47:29 +00:00
|
|
|
};
|
|
|
|
|
2016-06-12 10:53:44 +00:00
|
|
|
static int strbuf_set_helper_option(struct helper_data *data,
|
|
|
|
struct strbuf *buf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
sendline(data, buf);
|
|
|
|
if (recvline(data, buf))
|
|
|
|
exit(128);
|
|
|
|
|
|
|
|
if (!strcmp(buf->buf, "ok"))
|
|
|
|
ret = 0;
|
|
|
|
else if (starts_with(buf->buf, "error"))
|
|
|
|
ret = -1;
|
|
|
|
else if (!strcmp(buf->buf, "unsupported"))
|
|
|
|
ret = 1;
|
|
|
|
else {
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("%s unexpectedly said: '%s'"), data->name, buf->buf);
|
2016-06-12 10:53:44 +00:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-12 10:54:04 +00:00
|
|
|
static int string_list_set_helper_option(struct helper_data *data,
|
|
|
|
const char *name,
|
|
|
|
struct string_list *list)
|
|
|
|
{
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
int i, ret = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < list->nr; i++) {
|
|
|
|
strbuf_addf(&buf, "option %s ", name);
|
|
|
|
quote_c_style(list->items[i].string, &buf, NULL, 0);
|
|
|
|
strbuf_addch(&buf, '\n');
|
|
|
|
|
|
|
|
if ((ret = strbuf_set_helper_option(data, &buf)))
|
|
|
|
break;
|
|
|
|
strbuf_reset(&buf);
|
|
|
|
}
|
|
|
|
strbuf_release(&buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-10-31 00:47:29 +00:00
|
|
|
static int set_helper_option(struct transport *transport,
|
|
|
|
const char *name, const char *value)
|
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
int i, ret, is_bool = 0;
|
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
get_helper(transport);
|
|
|
|
|
2009-10-31 00:47:29 +00:00
|
|
|
if (!data->option)
|
|
|
|
return 1;
|
|
|
|
|
2016-06-12 10:54:04 +00:00
|
|
|
if (!strcmp(name, "deepen-not"))
|
|
|
|
return string_list_set_helper_option(data, name,
|
|
|
|
(struct string_list *)value);
|
|
|
|
|
2009-10-31 00:47:29 +00:00
|
|
|
for (i = 0; i < ARRAY_SIZE(unsupported_options); i++) {
|
|
|
|
if (!strcmp(name, unsupported_options[i]))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(boolean_options); i++) {
|
|
|
|
if (!strcmp(name, boolean_options[i])) {
|
|
|
|
is_bool = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
strbuf_addf(&buf, "option %s ", name);
|
|
|
|
if (is_bool)
|
|
|
|
strbuf_addstr(&buf, value ? "true" : "false");
|
|
|
|
else
|
|
|
|
quote_c_style(value, &buf, NULL, 0);
|
|
|
|
strbuf_addch(&buf, '\n');
|
|
|
|
|
2016-06-12 10:53:44 +00:00
|
|
|
ret = strbuf_set_helper_option(data, &buf);
|
2009-10-31 00:47:29 +00:00
|
|
|
strbuf_release(&buf);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void standard_options(struct transport *t)
|
|
|
|
{
|
|
|
|
char buf[16];
|
|
|
|
int v = t->verbose;
|
|
|
|
|
2010-02-24 12:50:26 +00:00
|
|
|
set_helper_option(t, "progress", t->progress ? "true" : "false");
|
2009-10-31 00:47:29 +00:00
|
|
|
|
2017-03-28 19:47:00 +00:00
|
|
|
xsnprintf(buf, sizeof(buf), "%d", v + 1);
|
2009-10-31 00:47:29 +00:00
|
|
|
set_helper_option(t, "verbosity", buf);
|
2016-02-03 04:09:14 +00:00
|
|
|
|
|
|
|
switch (t->family) {
|
|
|
|
case TRANSPORT_FAMILY_ALL:
|
|
|
|
/*
|
|
|
|
* this is already the default,
|
|
|
|
* do not break old remote helpers by setting "all" here
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
case TRANSPORT_FAMILY_IPV4:
|
|
|
|
set_helper_option(t, "family", "ipv4");
|
|
|
|
break;
|
|
|
|
case TRANSPORT_FAMILY_IPV6:
|
|
|
|
set_helper_option(t, "family", "ipv6");
|
|
|
|
break;
|
|
|
|
}
|
2009-10-31 00:47:29 +00:00
|
|
|
}
|
|
|
|
|
2009-11-18 01:42:21 +00:00
|
|
|
static int release_helper(struct transport *transport)
|
|
|
|
{
|
2011-07-16 13:03:35 +00:00
|
|
|
int res = 0;
|
2009-11-18 01:42:28 +00:00
|
|
|
struct helper_data *data = transport->data;
|
2018-05-16 22:58:03 +00:00
|
|
|
refspec_clear(&data->rs);
|
2011-07-16 13:03:35 +00:00
|
|
|
res = disconnect_helper(transport);
|
2009-11-18 01:42:21 +00:00
|
|
|
free(transport->data);
|
2011-07-16 13:03:35 +00:00
|
|
|
return res;
|
2009-11-18 01:42:21 +00:00
|
|
|
}
|
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
static int fetch_with_fetch(struct transport *transport,
|
2009-11-18 01:42:24 +00:00
|
|
|
int nr_heads, struct ref **to_fetch)
|
2009-08-05 05:01:53 +00:00
|
|
|
{
|
2009-10-31 00:47:28 +00:00
|
|
|
struct helper_data *data = transport->data;
|
2009-08-05 05:01:53 +00:00
|
|
|
int i;
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
|
|
|
|
for (i = 0; i < nr_heads; i++) {
|
2009-08-05 05:01:59 +00:00
|
|
|
const struct ref *posn = to_fetch[i];
|
2009-08-05 05:01:53 +00:00
|
|
|
if (posn->status & REF_STATUS_UPTODATE)
|
|
|
|
continue;
|
2009-09-04 02:13:51 +00:00
|
|
|
|
|
|
|
strbuf_addf(&buf, "fetch %s %s\n",
|
2015-11-10 02:22:20 +00:00
|
|
|
oid_to_hex(&posn->old_oid),
|
transport-helper: do not request symbolic refs to remote helpers
A typical remote helper will return a `list` of refs containing a symbolic
ref HEAD, pointing to, e.g. refs/heads/master. In the case of a clone, all
the refs are being requested through `fetch` or `import`, including the
symbolic ref.
While this works properly, in some cases of a fetch, like `git fetch url`
or `git fetch origin HEAD`, or any fetch command involving a symbolic ref
without also fetching the corresponding ref it points to, the fetch command
fails with:
fatal: bad object 0000000000000000000000000000000000000000
error: <remote> did not send all necessary objects
(in the case the remote helper returned '?' values to the `list` command).
This is because there is only one ref given to fetch(), and it's not
further resolved to something at the end of fetch_with_import().
While this can be somehow handled in the remote helper itself, by adding
a refspec for the symbolic ref, and storing an explicit ref in a private
namespace, and then handling the `import` for that symbolic ref
specifically, very few existing remote helpers are actually doing that.
So, instead of requesting the exact list of wanted refs to remote helpers,
treat symbolic refs differently and request the ref they point to instead.
Then, resolve the symbolic refs values based on the pointed ref.
This assumes there is no more than one level of indirection (a symbolic
ref doesn't point to another symbolic ref).
Signed-off-by: Mike Hommey <mh@glandium.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-01-19 01:35:07 +00:00
|
|
|
posn->symref ? posn->symref : posn->name);
|
2009-10-31 00:47:28 +00:00
|
|
|
}
|
2009-09-04 02:13:51 +00:00
|
|
|
|
2009-10-31 00:47:28 +00:00
|
|
|
strbuf_addch(&buf, '\n');
|
2009-12-09 15:26:27 +00:00
|
|
|
sendline(data, &buf);
|
2009-10-31 00:47:28 +00:00
|
|
|
|
|
|
|
while (1) {
|
2020-01-30 19:35:46 +00:00
|
|
|
const char *name;
|
|
|
|
|
2014-04-12 20:33:29 +00:00
|
|
|
if (recvline(data, &buf))
|
|
|
|
exit(128);
|
2009-10-31 00:47:28 +00:00
|
|
|
|
2020-01-30 19:35:46 +00:00
|
|
|
if (skip_prefix(buf.buf, "lock ", &name)) {
|
fetch-pack: support more than one pack lockfile
Whenever a fetch results in a packfile being downloaded, a .keep file is
generated, so that the packfile can be preserved (from, say, a running
"git repack") until refs are written referring to the contents of the
packfile.
In a subsequent patch, a successful fetch using protocol v2 may result
in more than one .keep file being generated. Therefore, teach
fetch_pack() and the transport mechanism to support multiple .keep
files.
Implementation notes:
- builtin/fetch-pack.c normally does not generate .keep files, and thus
is unaffected by this or future changes. However, it has an
undocumented "--lock-pack" feature, used by remote-curl.c when
implementing the "fetch" remote helper command. In keeping with the
remote helper protocol, only one "lock" line will ever be written;
the rest will result in warnings to stderr. However, in practice,
warnings will never be written because the remote-curl.c "fetch" is
only used for protocol v0/v1 (which will not generate multiple .keep
files). (Protocol v2 uses the "stateless-connect" command, not the
"fetch" command.)
- connected.c has an optimization in that connectivity checks on a ref
need not be done if the target object is in a pack known to be
self-contained and connected. If there are multiple packfiles, this
optimization can no longer be done.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-10 20:57:22 +00:00
|
|
|
if (transport->pack_lockfiles.nr)
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("%s also locked %s"), data->name, name);
|
2009-10-31 00:47:28 +00:00
|
|
|
else
|
fetch-pack: support more than one pack lockfile
Whenever a fetch results in a packfile being downloaded, a .keep file is
generated, so that the packfile can be preserved (from, say, a running
"git repack") until refs are written referring to the contents of the
packfile.
In a subsequent patch, a successful fetch using protocol v2 may result
in more than one .keep file being generated. Therefore, teach
fetch_pack() and the transport mechanism to support multiple .keep
files.
Implementation notes:
- builtin/fetch-pack.c normally does not generate .keep files, and thus
is unaffected by this or future changes. However, it has an
undocumented "--lock-pack" feature, used by remote-curl.c when
implementing the "fetch" remote helper command. In keeping with the
remote helper protocol, only one "lock" line will ever be written;
the rest will result in warnings to stderr. However, in practice,
warnings will never be written because the remote-curl.c "fetch" is
only used for protocol v0/v1 (which will not generate multiple .keep
files). (Protocol v2 uses the "stateless-connect" command, not the
"fetch" command.)
- connected.c has an optimization in that connectivity checks on a ref
need not be done if the target object is in a pack known to be
self-contained and connected. If there are multiple packfiles, this
optimization can no longer be done.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-10 20:57:22 +00:00
|
|
|
string_list_append(&transport->pack_lockfiles,
|
|
|
|
name);
|
2009-10-31 00:47:28 +00:00
|
|
|
}
|
2013-07-21 08:18:05 +00:00
|
|
|
else if (data->check_connectivity &&
|
|
|
|
data->transport_options.check_self_contained_and_connected &&
|
|
|
|
!strcmp(buf.buf, "connectivity-ok"))
|
|
|
|
data->transport_options.self_contained_and_connected = 1;
|
2009-10-31 00:47:28 +00:00
|
|
|
else if (!buf.len)
|
|
|
|
break;
|
|
|
|
else
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("%s unexpectedly said: '%s'"), data->name, buf.buf);
|
2009-08-05 05:01:53 +00:00
|
|
|
}
|
2009-10-31 00:47:28 +00:00
|
|
|
strbuf_release(&buf);
|
transport-helper: re-examine object dir after fetching
This patch fixes a bug where fetch over http (or any helper) using the
v0 protocol may sometimes fail to auto-follow tags. The bug comes from
61c7711cfe (sha1-file: use loose object cache for quick existence check,
2018-11-12). But to explain why (and why this is the right fix), let's
take a step back.
After fetching a pack, the object database has changed, but we may still
hold in-memory caches that are now out of date. Traditionally this was
just the packed_git list, but 61c7711cfe started using a loose-object
cache, as well.
Usually these caches are invalidated automatically. When an expected
object cannot be found, the low-level object lookup routines call
reprepare_packed_git(), which re-scans the set of packs (and thanks to
some preparatory patches ahead of 61c7711cfe, throws away the loose
object cache). But not all calls do this! In some cases we expect that
the object might not exist, and pass OBJECT_INFO_QUICK to tell the
low-level routines not to bother re-scanning. And the tag auto-following
code is one such caller, since we are asking about oids that the other
side has (but we might not have locally).
To deal with this, we explicitly call reprepare_packed_git() ourselves
after fetching a pack; this goes all the way back to 48ec3e5c07
(Incorporate fetched packs in future object traversal, 2008-06-15). But
that only helps if we call fetch_pack() in the main fetch process. When
we're using a transport helper, it happens in a separate sub-process,
and the parent process is left with old values. So this is only a
problem with protocols which require a separate helper process (like
http).
This patch fixes it by teaching the parent process in the transport
helper relationship to make that same reprepare call after the helper
finishes fetching.
You might be left with some lingering questions, like:
1. Why only the v0 protocol, and not v2? It's because in v2 the child
helper doesn't actually run fetch_pack(); it merely establishes a
tunnel over which the main process can talk to the remote side (so
the fetch_pack() and reprepare happen in the main process).
2. Wouldn't we have the same bug even before the 61c7711cfe added
the loose object cache? For example, when we store the fetch as a
pack locally, wouldn't our packed_git list still be out of date?
If we store a pack, everything works because other parts of the
fetch process happen to trigger a call to reprepare_packed_git().
In particular, before storing whatever ref was originally
requested, we'll make sure we have the pointed-to object, and that
call happens without the QUICK flag. So in that case we'll see that
we don't know about it, reprepare, and then repeat our lookup. And
now we _do_ know about the pack, and further calls with QUICK will
find its contents.
Whereas when we unpack the result into loose objects, we never get
that same invalidation trigger. We didn't have packs before, and we
don't after. But when we do the loose object lookup, we find the
object. There's no way to realize that we didn't have the object
before the pack, and that having it now means things have changed
(in theory we could do a superfluous cache lookup to see that it
was missing from the old cache; but depending on the tags the other
side showed us, we might not even have filled in that part of the
cache earlier).
3. Why does the included test use "--depth 1"? This is important
because without it, we happen to invalidate the cache as a side
effect of other parts of the fetch process. What happens in a
non-shallow fetch is something like this:
1. we call find_non_local_tags() once before actually getting the
pack, to see if there are any tags we can fill in from what we
already have. This fills in the cache (which is obviously
missing objects we're about to fetch).
2. before fetching the actual pack, fetch_and_consume_refs()
calls check_exist_and_connected(), to see if we even need to
fetch a pack at all. This doesn't use QUICK (though arguably
it could, as it's purely an optimization). And since it sees
there are objects we are indeed missing, that triggers a
reprepare_packed_git() call, which throws out the loose object
cache.
3. after fetching, now we call find_non_local_tags() again. And
since step (2) invalidated our loose object cache, we find
the new objects and create the tags.
So everything works, but mostly due to luck. Whereas in a fetch
with --depth, we skip step 2 entirely, and thus the out-of-date
cache is still in place for step 3, giving us the wrong answer.
So the test works with a small "--depth 1" fetch, which makes sure that
we don't store the pack from the other side, and that we don't trigger
the accidental cache invalidation. And of course it forces the use of
v0 along with using the http protocol.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2024-01-24 01:00:56 +00:00
|
|
|
|
|
|
|
reprepare_packed_git(the_repository);
|
2009-08-05 05:01:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-18 01:42:27 +00:00
|
|
|
static int get_importer(struct transport *transport, struct child_process *fastimport)
|
|
|
|
{
|
|
|
|
struct child_process *helper = get_helper(transport);
|
2012-09-19 15:21:19 +00:00
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
int cat_blob_fd, code;
|
2014-08-19 19:10:48 +00:00
|
|
|
child_process_init(fastimport);
|
2019-05-16 00:37:35 +00:00
|
|
|
fastimport->in = xdup(helper->out);
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(&fastimport->args, "fast-import");
|
|
|
|
strvec_push(&fastimport->args, "--allow-unsafe-features");
|
|
|
|
strvec_push(&fastimport->args, debug ? "--stats" : "--quiet");
|
2009-11-18 01:42:27 +00:00
|
|
|
|
2012-09-19 15:21:19 +00:00
|
|
|
if (data->bidi_import) {
|
|
|
|
cat_blob_fd = xdup(helper->in);
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_pushf(&fastimport->args, "--cat-blob-fd=%d", cat_blob_fd);
|
2012-09-19 15:21:19 +00:00
|
|
|
}
|
2009-11-18 01:42:27 +00:00
|
|
|
fastimport->git_cmd = 1;
|
2012-09-19 15:21:19 +00:00
|
|
|
|
|
|
|
code = start_command(fastimport);
|
|
|
|
return code;
|
2009-11-18 01:42:27 +00:00
|
|
|
}
|
|
|
|
|
2010-03-29 16:48:27 +00:00
|
|
|
static int get_exporter(struct transport *transport,
|
|
|
|
struct child_process *fastexport,
|
|
|
|
struct string_list *revlist_args)
|
|
|
|
{
|
2011-07-16 13:03:40 +00:00
|
|
|
struct helper_data *data = transport->data;
|
2010-03-29 16:48:27 +00:00
|
|
|
struct child_process *helper = get_helper(transport);
|
2014-05-15 08:34:44 +00:00
|
|
|
int i;
|
2014-04-12 20:33:31 +00:00
|
|
|
|
2014-10-28 20:52:34 +00:00
|
|
|
child_process_init(fastexport);
|
2010-03-29 16:48:27 +00:00
|
|
|
|
|
|
|
/* we need to duplicate helper->in because we want to use it after
|
|
|
|
* fastexport is done with it. */
|
|
|
|
fastexport->out = dup(helper->in);
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(&fastexport->args, "fast-export");
|
|
|
|
strvec_push(&fastexport->args, "--use-done-feature");
|
|
|
|
strvec_push(&fastexport->args, data->signed_tags ?
|
2014-05-15 08:34:44 +00:00
|
|
|
"--signed-tags=verbatim" : "--signed-tags=warn-strip");
|
|
|
|
if (data->export_marks)
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_pushf(&fastexport->args, "--export-marks=%s.tmp", data->export_marks);
|
2014-05-15 08:34:44 +00:00
|
|
|
if (data->import_marks)
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_pushf(&fastexport->args, "--import-marks=%s", data->import_marks);
|
2010-03-29 16:48:27 +00:00
|
|
|
|
|
|
|
for (i = 0; i < revlist_args->nr; i++)
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(&fastexport->args, revlist_args->items[i].string);
|
2010-03-29 16:48:27 +00:00
|
|
|
|
|
|
|
fastexport->git_cmd = 1;
|
|
|
|
return start_command(fastexport);
|
|
|
|
}
|
|
|
|
|
2009-11-18 01:42:27 +00:00
|
|
|
static int fetch_with_import(struct transport *transport,
|
|
|
|
int nr_heads, struct ref **to_fetch)
|
|
|
|
{
|
|
|
|
struct child_process fastimport;
|
2009-11-18 01:42:28 +00:00
|
|
|
struct helper_data *data = transport->data;
|
2009-11-18 01:42:27 +00:00
|
|
|
int i;
|
|
|
|
struct ref *posn;
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
get_helper(transport);
|
|
|
|
|
2009-11-18 01:42:27 +00:00
|
|
|
if (get_importer(transport, &fastimport))
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("couldn't run fast-import"));
|
2009-11-18 01:42:27 +00:00
|
|
|
|
|
|
|
for (i = 0; i < nr_heads; i++) {
|
|
|
|
posn = to_fetch[i];
|
|
|
|
if (posn->status & REF_STATUS_UPTODATE)
|
|
|
|
continue;
|
|
|
|
|
transport-helper: do not request symbolic refs to remote helpers
A typical remote helper will return a `list` of refs containing a symbolic
ref HEAD, pointing to, e.g. refs/heads/master. In the case of a clone, all
the refs are being requested through `fetch` or `import`, including the
symbolic ref.
While this works properly, in some cases of a fetch, like `git fetch url`
or `git fetch origin HEAD`, or any fetch command involving a symbolic ref
without also fetching the corresponding ref it points to, the fetch command
fails with:
fatal: bad object 0000000000000000000000000000000000000000
error: <remote> did not send all necessary objects
(in the case the remote helper returned '?' values to the `list` command).
This is because there is only one ref given to fetch(), and it's not
further resolved to something at the end of fetch_with_import().
While this can be somehow handled in the remote helper itself, by adding
a refspec for the symbolic ref, and storing an explicit ref in a private
namespace, and then handling the `import` for that symbolic ref
specifically, very few existing remote helpers are actually doing that.
So, instead of requesting the exact list of wanted refs to remote helpers,
treat symbolic refs differently and request the ref they point to instead.
Then, resolve the symbolic refs values based on the pointed ref.
This assumes there is no more than one level of indirection (a symbolic
ref doesn't point to another symbolic ref).
Signed-off-by: Mike Hommey <mh@glandium.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-01-19 01:35:07 +00:00
|
|
|
strbuf_addf(&buf, "import %s\n",
|
|
|
|
posn->symref ? posn->symref : posn->name);
|
2009-12-09 15:26:27 +00:00
|
|
|
sendline(data, &buf);
|
2009-11-18 01:42:27 +00:00
|
|
|
strbuf_reset(&buf);
|
|
|
|
}
|
2011-07-16 13:03:38 +00:00
|
|
|
|
|
|
|
write_constant(data->helper->in, "\n");
|
2012-09-19 15:21:19 +00:00
|
|
|
/*
|
|
|
|
* remote-helpers that advertise the bidi-import capability are required to
|
|
|
|
* buffer the complete batch of import commands until this newline before
|
|
|
|
* sending data to fast-import.
|
|
|
|
* These helpers read back data from fast-import on their stdin, which could
|
|
|
|
* be mixed with import commands, otherwise.
|
|
|
|
*/
|
2011-07-16 13:03:38 +00:00
|
|
|
|
2011-07-16 13:03:35 +00:00
|
|
|
if (finish_command(&fastimport))
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("error while running fast-import"));
|
2009-11-18 01:42:27 +00:00
|
|
|
|
2012-07-30 14:31:18 +00:00
|
|
|
/*
|
|
|
|
* The fast-import stream of a remote helper that advertises
|
|
|
|
* the "refspec" capability writes to the refs named after the
|
|
|
|
* right hand side of the first refspec matching each ref we
|
|
|
|
* were fetching.
|
|
|
|
*
|
|
|
|
* (If no "refspec" capability was specified, for historical
|
2013-04-18 04:14:28 +00:00
|
|
|
* reasons we default to the equivalent of *:*.)
|
2012-07-30 14:31:18 +00:00
|
|
|
*
|
|
|
|
* Store the result in to_fetch[i].old_sha1. Callers such
|
|
|
|
* as "git fetch" can use the value to write feedback to the
|
|
|
|
* terminal, populate FETCH_HEAD, and determine what new value
|
|
|
|
* should be written to peer_ref if the update is a
|
|
|
|
* fast-forward or this is a forced update.
|
|
|
|
*/
|
2009-11-18 01:42:27 +00:00
|
|
|
for (i = 0; i < nr_heads; i++) {
|
transport-helper: do not request symbolic refs to remote helpers
A typical remote helper will return a `list` of refs containing a symbolic
ref HEAD, pointing to, e.g. refs/heads/master. In the case of a clone, all
the refs are being requested through `fetch` or `import`, including the
symbolic ref.
While this works properly, in some cases of a fetch, like `git fetch url`
or `git fetch origin HEAD`, or any fetch command involving a symbolic ref
without also fetching the corresponding ref it points to, the fetch command
fails with:
fatal: bad object 0000000000000000000000000000000000000000
error: <remote> did not send all necessary objects
(in the case the remote helper returned '?' values to the `list` command).
This is because there is only one ref given to fetch(), and it's not
further resolved to something at the end of fetch_with_import().
While this can be somehow handled in the remote helper itself, by adding
a refspec for the symbolic ref, and storing an explicit ref in a private
namespace, and then handling the `import` for that symbolic ref
specifically, very few existing remote helpers are actually doing that.
So, instead of requesting the exact list of wanted refs to remote helpers,
treat symbolic refs differently and request the ref they point to instead.
Then, resolve the symbolic refs values based on the pointed ref.
This assumes there is no more than one level of indirection (a symbolic
ref doesn't point to another symbolic ref).
Signed-off-by: Mike Hommey <mh@glandium.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-01-19 01:35:07 +00:00
|
|
|
char *private, *name;
|
2009-11-18 01:42:27 +00:00
|
|
|
posn = to_fetch[i];
|
|
|
|
if (posn->status & REF_STATUS_UPTODATE)
|
|
|
|
continue;
|
transport-helper: do not request symbolic refs to remote helpers
A typical remote helper will return a `list` of refs containing a symbolic
ref HEAD, pointing to, e.g. refs/heads/master. In the case of a clone, all
the refs are being requested through `fetch` or `import`, including the
symbolic ref.
While this works properly, in some cases of a fetch, like `git fetch url`
or `git fetch origin HEAD`, or any fetch command involving a symbolic ref
without also fetching the corresponding ref it points to, the fetch command
fails with:
fatal: bad object 0000000000000000000000000000000000000000
error: <remote> did not send all necessary objects
(in the case the remote helper returned '?' values to the `list` command).
This is because there is only one ref given to fetch(), and it's not
further resolved to something at the end of fetch_with_import().
While this can be somehow handled in the remote helper itself, by adding
a refspec for the symbolic ref, and storing an explicit ref in a private
namespace, and then handling the `import` for that symbolic ref
specifically, very few existing remote helpers are actually doing that.
So, instead of requesting the exact list of wanted refs to remote helpers,
treat symbolic refs differently and request the ref they point to instead.
Then, resolve the symbolic refs values based on the pointed ref.
This assumes there is no more than one level of indirection (a symbolic
ref doesn't point to another symbolic ref).
Signed-off-by: Mike Hommey <mh@glandium.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-01-19 01:35:07 +00:00
|
|
|
name = posn->symref ? posn->symref : posn->name;
|
2018-05-16 22:58:03 +00:00
|
|
|
if (data->rs.nr)
|
2018-05-16 22:58:11 +00:00
|
|
|
private = apply_refspecs(&data->rs, name);
|
2009-11-18 01:42:28 +00:00
|
|
|
else
|
transport-helper: do not request symbolic refs to remote helpers
A typical remote helper will return a `list` of refs containing a symbolic
ref HEAD, pointing to, e.g. refs/heads/master. In the case of a clone, all
the refs are being requested through `fetch` or `import`, including the
symbolic ref.
While this works properly, in some cases of a fetch, like `git fetch url`
or `git fetch origin HEAD`, or any fetch command involving a symbolic ref
without also fetching the corresponding ref it points to, the fetch command
fails with:
fatal: bad object 0000000000000000000000000000000000000000
error: <remote> did not send all necessary objects
(in the case the remote helper returned '?' values to the `list` command).
This is because there is only one ref given to fetch(), and it's not
further resolved to something at the end of fetch_with_import().
While this can be somehow handled in the remote helper itself, by adding
a refspec for the symbolic ref, and storing an explicit ref in a private
namespace, and then handling the `import` for that symbolic ref
specifically, very few existing remote helpers are actually doing that.
So, instead of requesting the exact list of wanted refs to remote helpers,
treat symbolic refs differently and request the ref they point to instead.
Then, resolve the symbolic refs values based on the pointed ref.
This assumes there is no more than one level of indirection (a symbolic
ref doesn't point to another symbolic ref).
Signed-off-by: Mike Hommey <mh@glandium.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-01-19 01:35:07 +00:00
|
|
|
private = xstrdup(name);
|
2011-09-15 21:10:38 +00:00
|
|
|
if (private) {
|
2017-10-15 22:06:56 +00:00
|
|
|
if (read_ref(private, &posn->old_oid) < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("could not read ref %s"), private);
|
2011-09-15 21:10:38 +00:00
|
|
|
free(private);
|
|
|
|
}
|
2009-11-18 01:42:27 +00:00
|
|
|
}
|
2009-11-18 01:42:29 +00:00
|
|
|
strbuf_release(&buf);
|
2009-11-18 01:42:27 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-15 17:31:33 +00:00
|
|
|
static int run_connect(struct transport *transport, struct strbuf *cmdbuf)
|
2009-12-09 15:26:32 +00:00
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
2018-03-15 17:31:33 +00:00
|
|
|
int ret = 0;
|
|
|
|
int duped;
|
2009-12-09 15:26:32 +00:00
|
|
|
FILE *input;
|
2018-03-15 17:31:33 +00:00
|
|
|
struct child_process *helper;
|
2009-12-09 15:26:32 +00:00
|
|
|
|
|
|
|
helper = get_helper(transport);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Yes, dup the pipe another time, as we need unbuffered version
|
|
|
|
* of input pipe as FILE*. fclose() closes the underlying fd and
|
|
|
|
* stream buffering only can be changed before first I/O operation
|
|
|
|
* on it.
|
|
|
|
*/
|
|
|
|
duped = dup(helper->out);
|
|
|
|
if (duped < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die_errno(_("can't dup helper output fd"));
|
2009-12-09 15:26:32 +00:00
|
|
|
input = xfdopen(duped, "r");
|
|
|
|
setvbuf(input, NULL, _IONBF, 0);
|
|
|
|
|
2018-03-15 17:31:33 +00:00
|
|
|
sendline(data, cmdbuf);
|
|
|
|
if (recvline_fh(input, cmdbuf))
|
|
|
|
exit(128);
|
|
|
|
|
|
|
|
if (!strcmp(cmdbuf->buf, "")) {
|
|
|
|
data->no_disconnect_req = 1;
|
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Smart transport connection "
|
|
|
|
"ready.\n");
|
|
|
|
ret = 1;
|
|
|
|
} else if (!strcmp(cmdbuf->buf, "fallback")) {
|
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Falling back to dumb "
|
|
|
|
"transport.\n");
|
|
|
|
} else {
|
2018-11-26 19:57:56 +00:00
|
|
|
die(_("unknown response to connect: %s"),
|
2018-07-21 07:49:41 +00:00
|
|
|
cmdbuf->buf);
|
2018-03-15 17:31:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fclose(input);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int process_connect_service(struct transport *transport,
|
|
|
|
const char *name, const char *exec)
|
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
struct strbuf cmdbuf = STRBUF_INIT;
|
|
|
|
int ret = 0;
|
|
|
|
|
2009-12-09 15:26:32 +00:00
|
|
|
/*
|
|
|
|
* Handle --upload-pack and friends. This is fire and forget...
|
|
|
|
* just warn if it fails.
|
|
|
|
*/
|
|
|
|
if (strcmp(name, exec)) {
|
2018-03-15 17:31:33 +00:00
|
|
|
int r = set_helper_option(transport, "servpath", exec);
|
2009-12-09 15:26:32 +00:00
|
|
|
if (r > 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("setting remote service path not supported by protocol"));
|
2009-12-09 15:26:32 +00:00
|
|
|
else if (r < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("invalid remote service path"));
|
2009-12-09 15:26:32 +00:00
|
|
|
}
|
|
|
|
|
2018-03-15 17:31:33 +00:00
|
|
|
if (data->connect) {
|
2009-12-09 15:26:32 +00:00
|
|
|
strbuf_addf(&cmdbuf, "connect %s\n", name);
|
2018-03-15 17:31:33 +00:00
|
|
|
ret = run_connect(transport, &cmdbuf);
|
2018-03-15 17:31:34 +00:00
|
|
|
} else if (data->stateless_connect &&
|
|
|
|
(get_protocol_version_config() == protocol_v2) &&
|
2024-01-21 13:15:35 +00:00
|
|
|
(!strcmp("git-upload-pack", name) ||
|
|
|
|
!strcmp("git-upload-archive", name))) {
|
2018-03-15 17:31:34 +00:00
|
|
|
strbuf_addf(&cmdbuf, "stateless-connect %s\n", name);
|
|
|
|
ret = run_connect(transport, &cmdbuf);
|
|
|
|
if (ret)
|
|
|
|
transport->stateless_rpc = 1;
|
2018-03-15 17:31:33 +00:00
|
|
|
}
|
2009-12-09 15:26:32 +00:00
|
|
|
|
2017-08-30 18:20:15 +00:00
|
|
|
strbuf_release(&cmdbuf);
|
2009-12-09 15:26:32 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int process_connect(struct transport *transport,
|
|
|
|
int for_push)
|
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
const char *name;
|
|
|
|
const char *exec;
|
2024-01-21 13:15:38 +00:00
|
|
|
int ret;
|
2009-12-09 15:26:32 +00:00
|
|
|
|
|
|
|
name = for_push ? "git-receive-pack" : "git-upload-pack";
|
|
|
|
if (for_push)
|
|
|
|
exec = data->transport_options.receivepack;
|
|
|
|
else
|
|
|
|
exec = data->transport_options.uploadpack;
|
|
|
|
|
2024-01-21 13:15:38 +00:00
|
|
|
ret = process_connect_service(transport, name, exec);
|
|
|
|
if (ret)
|
|
|
|
do_take_over(transport);
|
|
|
|
return ret;
|
2009-12-09 15:26:32 +00:00
|
|
|
}
|
|
|
|
|
2009-12-09 15:26:33 +00:00
|
|
|
static int connect_helper(struct transport *transport, const char *name,
|
|
|
|
const char *exec, int fd[2])
|
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
|
|
|
|
/* Get_helper so connect is inited. */
|
|
|
|
get_helper(transport);
|
|
|
|
|
|
|
|
if (!process_connect_service(transport, name, exec))
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("can't connect to subservice %s"), name);
|
2009-12-09 15:26:33 +00:00
|
|
|
|
|
|
|
fd[0] = data->helper->out;
|
|
|
|
fd[1] = data->helper->in;
|
2024-01-21 13:15:37 +00:00
|
|
|
|
|
|
|
do_take_over(transport);
|
2009-12-09 15:26:33 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
transport-helper: skip ls-refs if unnecessary
Commit e70a3030e7 ("fetch: do not list refs if fetching only hashes",
2018-10-07) and its ancestors taught Git, as an optimization, to skip
the ls-refs step when it is not necessary during a protocol v2 fetch
(for example, when lazy fetching a missing object in a partial clone, or
when running "git fetch --no-tags <remote> <SHA-1>"). But that was only
done for natively supported protocols; in particular, HTTP was not
supported.
Teach Git to skip ls-refs when using remote helpers that support connect
or stateless-connect. To do this, fetch() is made an acceptable entry
point. Because fetch() can now be the first function in the vtable
called, "get_helper(transport);" has to be added to the beginning of
that function to set the transport up (if not yet set up) before
process_connect() is invoked.
When fetch() is called, the transport could be taken over (this happens
if "connect" or "stateless-connect" is successfully run without any
"fallback" response), or not. If the transport is taken over, execution
continues like execution for natively supported protocols
(fetch_refs_via_pack() is executed, which will fetch refs using ls-refs
if needed). If not, the remote helper interface will invoke
get_refs_list() if it hasn't been invoked yet, preserving existing
behavior.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-21 22:20:09 +00:00
|
|
|
static struct ref *get_refs_list_using_list(struct transport *transport,
|
|
|
|
int for_push);
|
|
|
|
|
2021-08-05 01:25:35 +00:00
|
|
|
static int fetch_refs(struct transport *transport,
|
|
|
|
int nr_heads, struct ref **to_fetch)
|
2009-08-05 05:01:53 +00:00
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
int i, count;
|
|
|
|
|
transport-helper: skip ls-refs if unnecessary
Commit e70a3030e7 ("fetch: do not list refs if fetching only hashes",
2018-10-07) and its ancestors taught Git, as an optimization, to skip
the ls-refs step when it is not necessary during a protocol v2 fetch
(for example, when lazy fetching a missing object in a partial clone, or
when running "git fetch --no-tags <remote> <SHA-1>"). But that was only
done for natively supported protocols; in particular, HTTP was not
supported.
Teach Git to skip ls-refs when using remote helpers that support connect
or stateless-connect. To do this, fetch() is made an acceptable entry
point. Because fetch() can now be the first function in the vtable
called, "get_helper(transport);" has to be added to the beginning of
that function to set the transport up (if not yet set up) before
process_connect() is invoked.
When fetch() is called, the transport could be taken over (this happens
if "connect" or "stateless-connect" is successfully run without any
"fallback" response), or not. If the transport is taken over, execution
continues like execution for natively supported protocols
(fetch_refs_via_pack() is executed, which will fetch refs using ls-refs
if needed). If not, the remote helper interface will invoke
get_refs_list() if it hasn't been invoked yet, preserving existing
behavior.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-21 22:20:09 +00:00
|
|
|
get_helper(transport);
|
|
|
|
|
2024-01-21 13:15:38 +00:00
|
|
|
if (process_connect(transport, 0))
|
2021-08-05 01:25:35 +00:00
|
|
|
return transport->vtable->fetch_refs(transport, nr_heads, to_fetch);
|
2009-12-09 15:26:32 +00:00
|
|
|
|
fetch: teach independent negotiation (no packfile)
Currently, the packfile negotiation step within a Git fetch cannot be
done independent of sending the packfile, even though there is at least
one application wherein this is useful. Therefore, make it possible for
this negotiation step to be done independently. A subsequent commit will
use this for one such application - push negotiation.
This feature is for protocol v2 only. (An implementation for protocol v0
would require a separate implementation in the fetch, transport, and
transport helper code.)
In the protocol, the main hindrance towards independent negotiation is
that the server can unilaterally decide to send the packfile. This is
solved by a "wait-for-done" argument: the server will then wait for the
client to say "done". In practice, the client will never say it; instead
it will cease requests once it is satisfied.
In the client, the main change lies in the transport and transport
helper code. fetch_refs_via_pack() performs everything needed - protocol
version and capability checks, and the negotiation itself.
There are 2 code paths that do not go through fetch_refs_via_pack() that
needed to be individually excluded: the bundle transport (excluded
through requiring smart_options, which the bundle transport doesn't
support) and transport helpers that do not support takeover. If or when
we support independent negotiation for protocol v0, we will need to
modify these 2 code paths to support it. But for now, report failure if
independent negotiation is requested in these cases.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-04 21:16:01 +00:00
|
|
|
/*
|
|
|
|
* If we reach here, then the server, the client, and/or the transport
|
|
|
|
* helper does not support protocol v2. --negotiate-only requires
|
|
|
|
* protocol v2.
|
|
|
|
*/
|
|
|
|
if (data->transport_options.acked_commits) {
|
|
|
|
warning(_("--negotiate-only requires protocol v2"));
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
transport-helper: skip ls-refs if unnecessary
Commit e70a3030e7 ("fetch: do not list refs if fetching only hashes",
2018-10-07) and its ancestors taught Git, as an optimization, to skip
the ls-refs step when it is not necessary during a protocol v2 fetch
(for example, when lazy fetching a missing object in a partial clone, or
when running "git fetch --no-tags <remote> <SHA-1>"). But that was only
done for natively supported protocols; in particular, HTTP was not
supported.
Teach Git to skip ls-refs when using remote helpers that support connect
or stateless-connect. To do this, fetch() is made an acceptable entry
point. Because fetch() can now be the first function in the vtable
called, "get_helper(transport);" has to be added to the beginning of
that function to set the transport up (if not yet set up) before
process_connect() is invoked.
When fetch() is called, the transport could be taken over (this happens
if "connect" or "stateless-connect" is successfully run without any
"fallback" response), or not. If the transport is taken over, execution
continues like execution for natively supported protocols
(fetch_refs_via_pack() is executed, which will fetch refs using ls-refs
if needed). If not, the remote helper interface will invoke
get_refs_list() if it hasn't been invoked yet, preserving existing
behavior.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-21 22:20:09 +00:00
|
|
|
if (!data->get_refs_list_called)
|
|
|
|
get_refs_list_using_list(transport, 0);
|
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
count = 0;
|
|
|
|
for (i = 0; i < nr_heads; i++)
|
|
|
|
if (!(to_fetch[i]->status & REF_STATUS_UPTODATE))
|
|
|
|
count++;
|
|
|
|
|
|
|
|
if (!count)
|
|
|
|
return 0;
|
|
|
|
|
2015-02-13 05:24:46 +00:00
|
|
|
if (data->check_connectivity &&
|
|
|
|
data->transport_options.check_self_contained_and_connected)
|
|
|
|
set_helper_option(transport, "check-connectivity", "true");
|
|
|
|
|
|
|
|
if (transport->cloning)
|
|
|
|
set_helper_option(transport, "cloning", "true");
|
|
|
|
|
|
|
|
if (data->transport_options.update_shallow)
|
|
|
|
set_helper_option(transport, "update-shallow", "true");
|
|
|
|
|
2022-03-28 14:02:08 +00:00
|
|
|
if (data->transport_options.refetch)
|
|
|
|
set_helper_option(transport, "refetch", "true");
|
|
|
|
|
2019-01-08 00:17:09 +00:00
|
|
|
if (data->transport_options.filter_options.choice) {
|
2019-06-27 22:54:10 +00:00
|
|
|
const char *spec = expand_list_objects_filter_spec(
|
|
|
|
&data->transport_options.filter_options);
|
|
|
|
set_helper_option(transport, "filter", spec);
|
2019-01-08 00:17:09 +00:00
|
|
|
}
|
2017-12-08 15:58:40 +00:00
|
|
|
|
2018-07-02 22:39:44 +00:00
|
|
|
if (data->transport_options.negotiation_tips)
|
|
|
|
warning("Ignoring --negotiation-tip because the protocol does not support it.");
|
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
if (data->fetch)
|
|
|
|
return fetch_with_fetch(transport, nr_heads, to_fetch);
|
|
|
|
|
2009-11-18 01:42:27 +00:00
|
|
|
if (data->import)
|
|
|
|
return fetch_with_import(transport, nr_heads, to_fetch);
|
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
struct push_update_ref_state {
|
|
|
|
struct ref *hint;
|
|
|
|
struct ref_push_report *report;
|
|
|
|
int new_report;
|
|
|
|
};
|
|
|
|
|
2013-04-18 04:14:33 +00:00
|
|
|
static int push_update_ref_status(struct strbuf *buf,
|
2020-08-27 15:45:46 +00:00
|
|
|
struct push_update_ref_state *state,
|
2011-07-16 13:03:34 +00:00
|
|
|
struct ref *remote_refs)
|
|
|
|
{
|
|
|
|
char *refname, *msg;
|
2013-11-12 20:56:57 +00:00
|
|
|
int status, forced = 0;
|
2011-07-16 13:03:34 +00:00
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
if (starts_with(buf->buf, "option ")) {
|
|
|
|
struct object_id old_oid, new_oid;
|
|
|
|
const char *key, *val;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
if (!state->hint || !(state->report || state->new_report))
|
|
|
|
die(_("'option' without a matching 'ok/error' directive"));
|
|
|
|
if (state->new_report) {
|
|
|
|
if (!state->hint->report) {
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(state->hint->report, 1);
|
2020-08-27 15:45:46 +00:00
|
|
|
state->report = state->hint->report;
|
|
|
|
} else {
|
|
|
|
state->report = state->hint->report;
|
|
|
|
while (state->report->next)
|
|
|
|
state->report = state->report->next;
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(state->report->next, 1);
|
2020-08-27 15:45:46 +00:00
|
|
|
state->report = state->report->next;
|
|
|
|
}
|
|
|
|
state->new_report = 0;
|
|
|
|
}
|
|
|
|
key = buf->buf + 7;
|
|
|
|
p = strchr(key, ' ');
|
|
|
|
if (p)
|
|
|
|
*p++ = '\0';
|
|
|
|
val = p;
|
|
|
|
if (!strcmp(key, "refname"))
|
|
|
|
state->report->ref_name = xstrdup_or_null(val);
|
|
|
|
else if (!strcmp(key, "old-oid") && val &&
|
|
|
|
!parse_oid_hex(val, &old_oid, &val))
|
|
|
|
state->report->old_oid = oiddup(&old_oid);
|
|
|
|
else if (!strcmp(key, "new-oid") && val &&
|
|
|
|
!parse_oid_hex(val, &new_oid, &val))
|
|
|
|
state->report->new_oid = oiddup(&new_oid);
|
|
|
|
else if (!strcmp(key, "forced-update"))
|
|
|
|
state->report->forced_update = 1;
|
|
|
|
/* Not update remote namespace again. */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
state->report = NULL;
|
|
|
|
state->new_report = 0;
|
|
|
|
|
2013-11-30 20:55:40 +00:00
|
|
|
if (starts_with(buf->buf, "ok ")) {
|
2011-07-16 13:03:34 +00:00
|
|
|
status = REF_STATUS_OK;
|
|
|
|
refname = buf->buf + 3;
|
2013-11-30 20:55:40 +00:00
|
|
|
} else if (starts_with(buf->buf, "error ")) {
|
2011-07-16 13:03:34 +00:00
|
|
|
status = REF_STATUS_REMOTE_REJECT;
|
|
|
|
refname = buf->buf + 6;
|
|
|
|
} else
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("expected ok/error, helper said '%s'"), buf->buf);
|
2011-07-16 13:03:34 +00:00
|
|
|
|
|
|
|
msg = strchr(refname, ' ');
|
|
|
|
if (msg) {
|
|
|
|
struct strbuf msg_buf = STRBUF_INIT;
|
|
|
|
const char *end;
|
|
|
|
|
|
|
|
*msg++ = '\0';
|
|
|
|
if (!unquote_c_style(&msg_buf, msg, &end))
|
|
|
|
msg = strbuf_detach(&msg_buf, NULL);
|
|
|
|
else
|
|
|
|
msg = xstrdup(msg);
|
|
|
|
strbuf_release(&msg_buf);
|
|
|
|
|
|
|
|
if (!strcmp(msg, "no match")) {
|
|
|
|
status = REF_STATUS_NONE;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
|
|
|
else if (!strcmp(msg, "up to date")) {
|
|
|
|
status = REF_STATUS_UPTODATE;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
|
|
|
else if (!strcmp(msg, "non-fast forward")) {
|
|
|
|
status = REF_STATUS_REJECT_NONFASTFORWARD;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
2012-11-30 01:41:37 +00:00
|
|
|
else if (!strcmp(msg, "already exists")) {
|
|
|
|
status = REF_STATUS_REJECT_ALREADY_EXISTS;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
2012-11-30 01:41:37 +00:00
|
|
|
}
|
push: introduce REJECT_FETCH_FIRST and REJECT_NEEDS_FORCE
When we push to update an existing ref, if:
* the object at the tip of the remote is not a commit; or
* the object we are pushing is not a commit,
it won't be correct to suggest to fetch, integrate and push again,
as the old and new objects will not "merge". We should explain that
the push must be forced when there is a non-committish object is
involved in such a case.
If we do not have the current object at the tip of the remote, we do
not even know that object, when fetched, is something that can be
merged. In such a case, suggesting to pull first just like
non-fast-forward case may not be technically correct, but in
practice, most such failures are seen when you try to push your work
to a branch without knowing that somebody else already pushed to
update the same branch since you forked, so "pull first" would work
as a suggestion most of the time. And if the object at the tip is
not a commit, "pull first" will fail, without making any permanent
damage. As a side effect, it also makes the error message the user
will get during the next "push" attempt easier to understand, now
the user is aware that a non-commit object is involved.
In these cases, the current code already rejects such a push on the
client end, but we used the same error and advice messages as the
ones used when rejecting a non-fast-forward push, i.e. pull from
there and integrate before pushing again.
Introduce new rejection reasons and reword the messages
appropriately.
[jc: with help by Peff on message details]
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-23 21:55:30 +00:00
|
|
|
else if (!strcmp(msg, "fetch first")) {
|
|
|
|
status = REF_STATUS_REJECT_FETCH_FIRST;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
push: introduce REJECT_FETCH_FIRST and REJECT_NEEDS_FORCE
When we push to update an existing ref, if:
* the object at the tip of the remote is not a commit; or
* the object we are pushing is not a commit,
it won't be correct to suggest to fetch, integrate and push again,
as the old and new objects will not "merge". We should explain that
the push must be forced when there is a non-committish object is
involved in such a case.
If we do not have the current object at the tip of the remote, we do
not even know that object, when fetched, is something that can be
merged. In such a case, suggesting to pull first just like
non-fast-forward case may not be technically correct, but in
practice, most such failures are seen when you try to push your work
to a branch without knowing that somebody else already pushed to
update the same branch since you forked, so "pull first" would work
as a suggestion most of the time. And if the object at the tip is
not a commit, "pull first" will fail, without making any permanent
damage. As a side effect, it also makes the error message the user
will get during the next "push" attempt easier to understand, now
the user is aware that a non-commit object is involved.
In these cases, the current code already rejects such a push on the
client end, but we used the same error and advice messages as the
ones used when rejecting a non-fast-forward push, i.e. pull from
there and integrate before pushing again.
Introduce new rejection reasons and reword the messages
appropriately.
[jc: with help by Peff on message details]
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-23 21:55:30 +00:00
|
|
|
}
|
|
|
|
else if (!strcmp(msg, "needs force")) {
|
|
|
|
status = REF_STATUS_REJECT_NEEDS_FORCE;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
push: introduce REJECT_FETCH_FIRST and REJECT_NEEDS_FORCE
When we push to update an existing ref, if:
* the object at the tip of the remote is not a commit; or
* the object we are pushing is not a commit,
it won't be correct to suggest to fetch, integrate and push again,
as the old and new objects will not "merge". We should explain that
the push must be forced when there is a non-committish object is
involved in such a case.
If we do not have the current object at the tip of the remote, we do
not even know that object, when fetched, is something that can be
merged. In such a case, suggesting to pull first just like
non-fast-forward case may not be technically correct, but in
practice, most such failures are seen when you try to push your work
to a branch without knowing that somebody else already pushed to
update the same branch since you forked, so "pull first" would work
as a suggestion most of the time. And if the object at the tip is
not a commit, "pull first" will fail, without making any permanent
damage. As a side effect, it also makes the error message the user
will get during the next "push" attempt easier to understand, now
the user is aware that a non-commit object is involved.
In these cases, the current code already rejects such a push on the
client end, but we used the same error and advice messages as the
ones used when rejecting a non-fast-forward push, i.e. pull from
there and integrate before pushing again.
Introduce new rejection reasons and reword the messages
appropriately.
[jc: with help by Peff on message details]
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-23 21:55:30 +00:00
|
|
|
}
|
2013-07-08 21:42:40 +00:00
|
|
|
else if (!strcmp(msg, "stale info")) {
|
|
|
|
status = REF_STATUS_REJECT_STALE;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
2013-07-08 21:42:40 +00:00
|
|
|
}
|
2020-10-03 12:10:44 +00:00
|
|
|
else if (!strcmp(msg, "remote ref updated since checkout")) {
|
|
|
|
status = REF_STATUS_REJECT_REMOTE_UPDATED;
|
|
|
|
FREE_AND_NULL(msg);
|
|
|
|
}
|
2013-11-12 20:56:57 +00:00
|
|
|
else if (!strcmp(msg, "forced update")) {
|
|
|
|
forced = 1;
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(msg);
|
2013-11-12 20:56:57 +00:00
|
|
|
}
|
2021-10-18 19:45:56 +00:00
|
|
|
else if (!strcmp(msg, "expecting report")) {
|
|
|
|
status = REF_STATUS_EXPECTING_REPORT;
|
|
|
|
FREE_AND_NULL(msg);
|
|
|
|
}
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
if (state->hint)
|
|
|
|
state->hint = find_ref_by_name(state->hint, refname);
|
|
|
|
if (!state->hint)
|
|
|
|
state->hint = find_ref_by_name(remote_refs, refname);
|
|
|
|
if (!state->hint) {
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("helper reported unexpected status of %s"), refname);
|
2013-04-18 04:14:33 +00:00
|
|
|
return 1;
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
if (state->hint->status != REF_STATUS_NONE) {
|
2011-07-16 13:03:34 +00:00
|
|
|
/*
|
|
|
|
* Earlier, the ref was marked not to be pushed, so ignore the ref
|
|
|
|
* status reported by the remote helper if the latter is 'no match'.
|
|
|
|
*/
|
|
|
|
if (status == REF_STATUS_NONE)
|
2013-04-18 04:14:33 +00:00
|
|
|
return 1;
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
if (status == REF_STATUS_OK)
|
|
|
|
state->new_report = 1;
|
|
|
|
state->hint->status = status;
|
|
|
|
state->hint->forced_update |= forced;
|
|
|
|
state->hint->remote_status = msg;
|
2013-05-10 12:08:30 +00:00
|
|
|
return !(status == REF_STATUS_OK);
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
|
|
|
|
2014-04-12 20:33:30 +00:00
|
|
|
static int push_update_refs_status(struct helper_data *data,
|
2013-10-31 09:36:37 +00:00
|
|
|
struct ref *remote_refs,
|
|
|
|
int flags)
|
2011-07-16 13:03:34 +00:00
|
|
|
{
|
2020-08-27 15:45:46 +00:00
|
|
|
struct ref *ref;
|
|
|
|
struct ref_push_report *report;
|
2011-07-16 13:03:34 +00:00
|
|
|
struct strbuf buf = STRBUF_INIT;
|
2020-08-27 15:45:46 +00:00
|
|
|
struct push_update_ref_state state = { remote_refs, NULL, 0 };
|
2014-04-12 20:33:30 +00:00
|
|
|
|
2011-07-16 13:03:34 +00:00
|
|
|
for (;;) {
|
2014-04-12 20:33:30 +00:00
|
|
|
if (recvline(data, &buf)) {
|
2020-08-27 15:45:46 +00:00
|
|
|
strbuf_release(&buf);
|
|
|
|
return 1;
|
2014-04-12 20:33:30 +00:00
|
|
|
}
|
2011-07-16 13:03:34 +00:00
|
|
|
if (!buf.len)
|
|
|
|
break;
|
2020-08-27 15:45:46 +00:00
|
|
|
push_update_ref_status(&buf, &state, remote_refs);
|
|
|
|
}
|
|
|
|
strbuf_release(&buf);
|
2011-07-16 13:03:34 +00:00
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
if (flags & TRANSPORT_PUSH_DRY_RUN || !data->rs.nr || data->no_private_update)
|
|
|
|
return 0;
|
2013-04-18 04:14:33 +00:00
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
/* propagate back the update to the remote namespace */
|
|
|
|
for (ref = remote_refs; ref; ref = ref->next) {
|
|
|
|
char *private;
|
2013-04-18 04:14:33 +00:00
|
|
|
|
2020-08-27 15:45:46 +00:00
|
|
|
if (ref->status != REF_STATUS_OK)
|
2013-04-18 04:14:33 +00:00
|
|
|
continue;
|
2020-08-27 15:45:46 +00:00
|
|
|
|
|
|
|
if (!ref->report) {
|
|
|
|
private = apply_refspecs(&data->rs, ref->name);
|
|
|
|
if (!private)
|
|
|
|
continue;
|
|
|
|
update_ref("update by helper", private, &(ref->new_oid),
|
|
|
|
NULL, 0, 0);
|
|
|
|
free(private);
|
|
|
|
} else {
|
|
|
|
for (report = ref->report; report; report = report->next) {
|
|
|
|
private = apply_refspecs(&data->rs,
|
|
|
|
report->ref_name
|
|
|
|
? report->ref_name
|
|
|
|
: ref->name);
|
|
|
|
if (!private)
|
|
|
|
continue;
|
|
|
|
update_ref("update by helper", private,
|
|
|
|
report->new_oid
|
|
|
|
? report->new_oid
|
|
|
|
: &(ref->new_oid),
|
|
|
|
NULL, 0, 0);
|
|
|
|
free(private);
|
|
|
|
}
|
|
|
|
}
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
2020-08-27 15:45:46 +00:00
|
|
|
return 0;
|
2011-07-16 13:03:34 +00:00
|
|
|
}
|
|
|
|
|
2015-08-19 15:26:46 +00:00
|
|
|
static void set_common_push_options(struct transport *transport,
|
|
|
|
const char *name, int flags)
|
|
|
|
{
|
|
|
|
if (flags & TRANSPORT_PUSH_DRY_RUN) {
|
|
|
|
if (set_helper_option(transport, "dry-run", "true") != 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("helper %s does not support dry-run"), name);
|
2015-08-19 15:26:46 +00:00
|
|
|
} else if (flags & TRANSPORT_PUSH_CERT_ALWAYS) {
|
|
|
|
if (set_helper_option(transport, TRANS_OPT_PUSH_CERT, "true") != 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("helper %s does not support --signed"), name);
|
2015-08-19 15:26:46 +00:00
|
|
|
} else if (flags & TRANSPORT_PUSH_CERT_IF_ASKED) {
|
|
|
|
if (set_helper_option(transport, TRANS_OPT_PUSH_CERT, "if-asked") != 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("helper %s does not support --signed=if-asked"), name);
|
2015-08-19 15:26:46 +00:00
|
|
|
}
|
2017-02-08 22:04:00 +00:00
|
|
|
|
remote-curl: pass on atomic capability to remote side
When pushing more than one reference with the --atomic option, the
server is supposed to perform a single atomic transaction to update the
references, leaving them either all to succeed or all to fail. This
works fine when pushing locally or over SSH, but when pushing over HTTP,
we fail to pass the atomic capability to the remote side. In fact, we
have not reported this capability to any remote helpers during the life
of the feature.
Now normally, things happen to work nevertheless, since we actually
check for most types of failures, such as non-fast-forward updates, on
the client side, and just abort the entire attempt. However, if the
server side reports a problem, such as the inability to lock a ref, the
transaction isn't atomic, because we haven't passed the appropriate
capability over and the remote side has no way of knowing that we wanted
atomic behavior.
Fix this by passing the option from the transport code through to remote
helpers, and from the HTTP remote helper down to send-pack. With this
change, we can detect if the server side rejects the push and report
back appropriately. Note the difference in the messages: the remote
side reports "atomic transaction failed", while our own checking rejects
pushes with the message "atomic push failed".
Document the atomic option in the remote helper documentation, so other
implementers can implement it if they like.
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-16 23:45:34 +00:00
|
|
|
if (flags & TRANSPORT_PUSH_ATOMIC)
|
|
|
|
if (set_helper_option(transport, TRANS_OPT_ATOMIC, "true") != 0)
|
|
|
|
die(_("helper %s does not support --atomic"), name);
|
|
|
|
|
2020-10-03 12:10:45 +00:00
|
|
|
if (flags & TRANSPORT_PUSH_FORCE_IF_INCLUDES)
|
|
|
|
if (set_helper_option(transport, TRANS_OPT_FORCE_IF_INCLUDES, "true") != 0)
|
|
|
|
die(_("helper %s does not support --%s"),
|
|
|
|
name, TRANS_OPT_FORCE_IF_INCLUDES);
|
|
|
|
|
2017-02-08 22:04:00 +00:00
|
|
|
if (flags & TRANSPORT_PUSH_OPTIONS) {
|
|
|
|
struct string_list_item *item;
|
|
|
|
for_each_string_list_item(item, transport->push_options)
|
|
|
|
if (set_helper_option(transport, "push-option", item->string) != 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("helper %s does not support 'push-option'"), name);
|
2017-02-08 22:04:00 +00:00
|
|
|
}
|
2015-08-19 15:26:46 +00:00
|
|
|
}
|
|
|
|
|
2010-03-29 16:48:27 +00:00
|
|
|
static int push_refs_with_push(struct transport *transport,
|
2013-08-02 22:14:50 +00:00
|
|
|
struct ref *remote_refs, int flags)
|
2009-10-31 00:47:30 +00:00
|
|
|
{
|
|
|
|
int force_all = flags & TRANSPORT_PUSH_FORCE;
|
|
|
|
int mirror = flags & TRANSPORT_PUSH_MIRROR;
|
2019-07-11 21:19:19 +00:00
|
|
|
int atomic = flags & TRANSPORT_PUSH_ATOMIC;
|
2009-10-31 00:47:30 +00:00
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
struct ref *ref;
|
2013-08-02 22:14:50 +00:00
|
|
|
struct string_list cas_options = STRING_LIST_INIT_DUP;
|
|
|
|
struct string_list_item *cas_option;
|
2009-10-31 00:47:30 +00:00
|
|
|
|
2011-03-22 12:50:08 +00:00
|
|
|
get_helper(transport);
|
2009-10-31 00:47:30 +00:00
|
|
|
if (!data->push)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
for (ref = remote_refs; ref; ref = ref->next) {
|
2010-01-08 02:12:42 +00:00
|
|
|
if (!ref->peer_ref && !mirror)
|
2009-10-31 00:47:30 +00:00
|
|
|
continue;
|
|
|
|
|
2010-01-08 02:12:42 +00:00
|
|
|
/* Check for statuses set by set_ref_status_for_push() */
|
|
|
|
switch (ref->status) {
|
|
|
|
case REF_STATUS_REJECT_NONFASTFORWARD:
|
2013-07-08 21:42:40 +00:00
|
|
|
case REF_STATUS_REJECT_STALE:
|
2012-11-30 01:41:37 +00:00
|
|
|
case REF_STATUS_REJECT_ALREADY_EXISTS:
|
2020-10-03 12:10:44 +00:00
|
|
|
case REF_STATUS_REJECT_REMOTE_UPDATED:
|
2019-07-11 21:19:19 +00:00
|
|
|
if (atomic) {
|
2020-04-17 09:45:36 +00:00
|
|
|
reject_atomic_push(remote_refs, mirror);
|
2019-07-11 21:19:19 +00:00
|
|
|
string_list_clear(&cas_options, 0);
|
|
|
|
return 0;
|
|
|
|
} else
|
|
|
|
continue;
|
2010-01-08 02:12:42 +00:00
|
|
|
case REF_STATUS_UPTODATE:
|
2009-10-31 00:47:30 +00:00
|
|
|
continue;
|
2010-01-08 02:12:42 +00:00
|
|
|
default:
|
|
|
|
; /* do nothing */
|
2009-10-31 00:47:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (force_all)
|
|
|
|
ref->force = 1;
|
|
|
|
|
|
|
|
strbuf_addstr(&buf, "push ");
|
|
|
|
if (!ref->deletion) {
|
|
|
|
if (ref->force)
|
|
|
|
strbuf_addch(&buf, '+');
|
|
|
|
if (ref->peer_ref)
|
|
|
|
strbuf_addstr(&buf, ref->peer_ref->name);
|
|
|
|
else
|
2015-11-10 02:22:20 +00:00
|
|
|
strbuf_addstr(&buf, oid_to_hex(&ref->new_oid));
|
2009-10-31 00:47:30 +00:00
|
|
|
}
|
|
|
|
strbuf_addch(&buf, ':');
|
|
|
|
strbuf_addstr(&buf, ref->name);
|
|
|
|
strbuf_addch(&buf, '\n');
|
2013-08-02 22:14:50 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The "--force-with-lease" options without explicit
|
|
|
|
* values to expect have already been expanded into
|
2015-11-10 02:22:20 +00:00
|
|
|
* the ref->old_oid_expect[] field; we can ignore
|
2013-08-02 22:14:50 +00:00
|
|
|
* transport->smart_options->cas altogether and instead
|
|
|
|
* can enumerate them from the refs.
|
|
|
|
*/
|
|
|
|
if (ref->expect_old_sha1) {
|
|
|
|
struct strbuf cas = STRBUF_INIT;
|
|
|
|
strbuf_addf(&cas, "%s:%s",
|
2015-11-10 02:22:20 +00:00
|
|
|
ref->name, oid_to_hex(&ref->old_oid_expect));
|
2017-12-08 17:29:31 +00:00
|
|
|
string_list_append_nodup(&cas_options,
|
|
|
|
strbuf_detach(&cas, NULL));
|
2013-08-02 22:14:50 +00:00
|
|
|
}
|
2009-10-31 00:47:30 +00:00
|
|
|
}
|
2013-08-02 22:14:50 +00:00
|
|
|
if (buf.len == 0) {
|
|
|
|
string_list_clear(&cas_options, 0);
|
2009-10-31 00:47:31 +00:00
|
|
|
return 0;
|
2013-08-02 22:14:50 +00:00
|
|
|
}
|
2009-10-31 00:47:30 +00:00
|
|
|
|
2013-08-02 22:14:50 +00:00
|
|
|
for_each_string_list_item(cas_option, &cas_options)
|
|
|
|
set_helper_option(transport, "cas", cas_option->string);
|
2015-08-19 15:26:46 +00:00
|
|
|
set_common_push_options(transport, data->name, flags);
|
2009-10-31 00:47:30 +00:00
|
|
|
|
|
|
|
strbuf_addch(&buf, '\n');
|
2009-12-09 15:26:27 +00:00
|
|
|
sendline(data, &buf);
|
2009-10-31 00:47:30 +00:00
|
|
|
strbuf_release(&buf);
|
2017-12-08 17:29:31 +00:00
|
|
|
string_list_clear(&cas_options, 0);
|
2011-07-16 13:03:34 +00:00
|
|
|
|
2014-04-12 20:33:30 +00:00
|
|
|
return push_update_refs_status(data, remote_refs, flags);
|
2009-10-31 00:47:30 +00:00
|
|
|
}
|
|
|
|
|
2010-03-29 16:48:27 +00:00
|
|
|
static int push_refs_with_export(struct transport *transport,
|
|
|
|
struct ref *remote_refs, int flags)
|
|
|
|
{
|
|
|
|
struct ref *ref;
|
|
|
|
struct child_process *helper, exporter;
|
|
|
|
struct helper_data *data = transport->data;
|
2014-04-20 18:59:25 +00:00
|
|
|
struct string_list revlist_args = STRING_LIST_INIT_DUP;
|
2010-03-29 16:48:27 +00:00
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
|
2018-05-16 22:58:03 +00:00
|
|
|
if (!data->rs.nr)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("remote-helper doesn't support push; refspec needed"));
|
2013-04-18 04:14:30 +00:00
|
|
|
|
2015-08-19 15:26:46 +00:00
|
|
|
set_common_push_options(transport, data->name, flags);
|
2013-11-12 20:56:56 +00:00
|
|
|
if (flags & TRANSPORT_PUSH_FORCE) {
|
|
|
|
if (set_helper_option(transport, "force", "true") != 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
warning(_("helper %s does not support 'force'"), data->name);
|
2013-11-12 20:56:56 +00:00
|
|
|
}
|
|
|
|
|
2010-03-29 16:48:27 +00:00
|
|
|
helper = get_helper(transport);
|
|
|
|
|
|
|
|
write_constant(helper->in, "export\n");
|
|
|
|
|
|
|
|
for (ref = remote_refs; ref; ref = ref->next) {
|
|
|
|
char *private;
|
2015-11-10 02:22:24 +00:00
|
|
|
struct object_id oid;
|
2010-03-29 16:48:27 +00:00
|
|
|
|
2018-05-16 22:58:11 +00:00
|
|
|
private = apply_refspecs(&data->rs, ref->name);
|
2023-03-28 13:58:46 +00:00
|
|
|
if (private && !repo_get_oid(the_repository, private, &oid)) {
|
2010-03-29 16:48:27 +00:00
|
|
|
strbuf_addf(&buf, "^%s", private);
|
2017-12-08 17:29:31 +00:00
|
|
|
string_list_append_nodup(&revlist_args,
|
|
|
|
strbuf_detach(&buf, NULL));
|
2015-11-10 02:22:24 +00:00
|
|
|
oidcpy(&ref->old_oid, &oid);
|
2010-03-29 16:48:27 +00:00
|
|
|
}
|
2011-07-16 13:03:21 +00:00
|
|
|
free(private);
|
2010-03-29 16:48:27 +00:00
|
|
|
|
2013-05-21 01:02:45 +00:00
|
|
|
if (ref->peer_ref) {
|
2014-04-20 18:59:25 +00:00
|
|
|
if (strcmp(ref->name, ref->peer_ref->name)) {
|
2014-04-20 18:59:29 +00:00
|
|
|
if (!ref->deletion) {
|
|
|
|
const char *name;
|
|
|
|
int flag;
|
|
|
|
|
|
|
|
/* Follow symbolic refs (mainly for HEAD). */
|
refs: convert resolve_ref_unsafe to struct object_id
Convert resolve_ref_unsafe to take a pointer to struct object_id by
converting one remaining caller to use struct object_id, removing the
temporary NULL pointer check in expand_ref, converting the declaration
and definition, and applying the following semantic patch:
@@
expression E1, E2, E3, E4;
@@
- resolve_ref_unsafe(E1, E2, E3.hash, E4)
+ resolve_ref_unsafe(E1, E2, &E3, E4)
@@
expression E1, E2, E3, E4;
@@
- resolve_ref_unsafe(E1, E2, E3->hash, E4)
+ resolve_ref_unsafe(E1, E2, E3, E4)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-15 22:07:09 +00:00
|
|
|
name = resolve_ref_unsafe(ref->peer_ref->name,
|
|
|
|
RESOLVE_REF_READING,
|
|
|
|
&oid, &flag);
|
2014-04-20 18:59:29 +00:00
|
|
|
if (!name || !(flag & REF_ISSYMREF))
|
|
|
|
name = ref->peer_ref->name;
|
2014-04-20 18:59:26 +00:00
|
|
|
|
2014-04-20 18:59:29 +00:00
|
|
|
strbuf_addf(&buf, "%s:%s", name, ref->name);
|
|
|
|
} else
|
|
|
|
strbuf_addf(&buf, ":%s", ref->name);
|
2014-04-20 18:59:26 +00:00
|
|
|
|
2014-04-20 18:59:25 +00:00
|
|
|
string_list_append(&revlist_args, "--refspec");
|
|
|
|
string_list_append(&revlist_args, buf.buf);
|
|
|
|
strbuf_release(&buf);
|
|
|
|
}
|
2014-04-20 18:59:29 +00:00
|
|
|
if (!ref->deletion)
|
|
|
|
string_list_append(&revlist_args, ref->peer_ref->name);
|
2013-05-21 01:02:45 +00:00
|
|
|
}
|
2010-03-29 16:48:27 +00:00
|
|
|
}
|
|
|
|
|
2011-07-16 13:03:40 +00:00
|
|
|
if (get_exporter(transport, &exporter, &revlist_args))
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("couldn't run fast-export"));
|
2010-03-29 16:48:27 +00:00
|
|
|
|
2014-04-20 18:59:25 +00:00
|
|
|
string_list_clear(&revlist_args, 1);
|
|
|
|
|
2011-07-16 13:03:35 +00:00
|
|
|
if (finish_command(&exporter))
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("error while running fast-export"));
|
2014-04-12 20:33:32 +00:00
|
|
|
if (push_update_refs_status(data, remote_refs, flags))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (data->export_marks) {
|
|
|
|
strbuf_addf(&buf, "%s.tmp", data->export_marks);
|
|
|
|
rename(buf.buf, data->export_marks);
|
|
|
|
strbuf_release(&buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2010-03-29 16:48:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int push_refs(struct transport *transport,
|
|
|
|
struct ref *remote_refs, int flags)
|
|
|
|
{
|
|
|
|
struct helper_data *data = transport->data;
|
|
|
|
|
2024-01-21 13:15:38 +00:00
|
|
|
if (process_connect(transport, 1))
|
2017-12-14 21:44:45 +00:00
|
|
|
return transport->vtable->push_refs(transport, remote_refs, flags);
|
2010-03-29 16:48:27 +00:00
|
|
|
|
|
|
|
if (!remote_refs) {
|
2018-07-21 07:49:41 +00:00
|
|
|
fprintf(stderr,
|
|
|
|
_("No refs in common and none specified; doing nothing.\n"
|
2020-06-24 14:46:29 +00:00
|
|
|
"Perhaps you should specify a branch.\n"));
|
2010-03-29 16:48:27 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data->push)
|
|
|
|
return push_refs_with_push(transport, remote_refs, flags);
|
|
|
|
|
|
|
|
if (data->export)
|
|
|
|
return push_refs_with_export(transport, remote_refs, flags);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-12-09 10:25:21 +00:00
|
|
|
static int has_attribute(const char *attrs, const char *attr)
|
|
|
|
{
|
2009-11-18 01:42:30 +00:00
|
|
|
int len;
|
|
|
|
if (!attrs)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
len = strlen(attr);
|
|
|
|
for (;;) {
|
|
|
|
const char *space = strchrnul(attrs, ' ');
|
|
|
|
if (len == space - attrs && !strncmp(attrs, attr, len))
|
|
|
|
return 1;
|
|
|
|
if (!*space)
|
|
|
|
return 0;
|
|
|
|
attrs = space + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-15 17:31:22 +00:00
|
|
|
static struct ref *get_refs_list(struct transport *transport, int for_push,
|
2021-02-05 20:48:48 +00:00
|
|
|
struct transport_ls_refs_options *transport_options)
|
transport-helper: skip ls-refs if unnecessary
Commit e70a3030e7 ("fetch: do not list refs if fetching only hashes",
2018-10-07) and its ancestors taught Git, as an optimization, to skip
the ls-refs step when it is not necessary during a protocol v2 fetch
(for example, when lazy fetching a missing object in a partial clone, or
when running "git fetch --no-tags <remote> <SHA-1>"). But that was only
done for natively supported protocols; in particular, HTTP was not
supported.
Teach Git to skip ls-refs when using remote helpers that support connect
or stateless-connect. To do this, fetch() is made an acceptable entry
point. Because fetch() can now be the first function in the vtable
called, "get_helper(transport);" has to be added to the beginning of
that function to set the transport up (if not yet set up) before
process_connect() is invoked.
When fetch() is called, the transport could be taken over (this happens
if "connect" or "stateless-connect" is successfully run without any
"fallback" response), or not. If the transport is taken over, execution
continues like execution for natively supported protocols
(fetch_refs_via_pack() is executed, which will fetch refs using ls-refs
if needed). If not, the remote helper interface will invoke
get_refs_list() if it hasn't been invoked yet, preserving existing
behavior.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-21 22:20:09 +00:00
|
|
|
{
|
|
|
|
get_helper(transport);
|
|
|
|
|
2024-01-21 13:15:38 +00:00
|
|
|
if (process_connect(transport, for_push))
|
2021-02-05 20:48:48 +00:00
|
|
|
return transport->vtable->get_refs_list(transport, for_push,
|
|
|
|
transport_options);
|
transport-helper: skip ls-refs if unnecessary
Commit e70a3030e7 ("fetch: do not list refs if fetching only hashes",
2018-10-07) and its ancestors taught Git, as an optimization, to skip
the ls-refs step when it is not necessary during a protocol v2 fetch
(for example, when lazy fetching a missing object in a partial clone, or
when running "git fetch --no-tags <remote> <SHA-1>"). But that was only
done for natively supported protocols; in particular, HTTP was not
supported.
Teach Git to skip ls-refs when using remote helpers that support connect
or stateless-connect. To do this, fetch() is made an acceptable entry
point. Because fetch() can now be the first function in the vtable
called, "get_helper(transport);" has to be added to the beginning of
that function to set the transport up (if not yet set up) before
process_connect() is invoked.
When fetch() is called, the transport could be taken over (this happens
if "connect" or "stateless-connect" is successfully run without any
"fallback" response), or not. If the transport is taken over, execution
continues like execution for natively supported protocols
(fetch_refs_via_pack() is executed, which will fetch refs using ls-refs
if needed). If not, the remote helper interface will invoke
get_refs_list() if it hasn't been invoked yet, preserving existing
behavior.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-21 22:20:09 +00:00
|
|
|
|
|
|
|
return get_refs_list_using_list(transport, for_push);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ref *get_refs_list_using_list(struct transport *transport,
|
|
|
|
int for_push)
|
2009-08-05 05:01:53 +00:00
|
|
|
{
|
2009-10-31 00:47:28 +00:00
|
|
|
struct helper_data *data = transport->data;
|
2009-08-05 05:01:53 +00:00
|
|
|
struct child_process *helper;
|
|
|
|
struct ref *ret = NULL;
|
|
|
|
struct ref **tail = &ret;
|
|
|
|
struct ref *posn;
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
|
transport-helper: skip ls-refs if unnecessary
Commit e70a3030e7 ("fetch: do not list refs if fetching only hashes",
2018-10-07) and its ancestors taught Git, as an optimization, to skip
the ls-refs step when it is not necessary during a protocol v2 fetch
(for example, when lazy fetching a missing object in a partial clone, or
when running "git fetch --no-tags <remote> <SHA-1>"). But that was only
done for natively supported protocols; in particular, HTTP was not
supported.
Teach Git to skip ls-refs when using remote helpers that support connect
or stateless-connect. To do this, fetch() is made an acceptable entry
point. Because fetch() can now be the first function in the vtable
called, "get_helper(transport);" has to be added to the beginning of
that function to set the transport up (if not yet set up) before
process_connect() is invoked.
When fetch() is called, the transport could be taken over (this happens
if "connect" or "stateless-connect" is successfully run without any
"fallback" response), or not. If the transport is taken over, execution
continues like execution for natively supported protocols
(fetch_refs_via_pack() is executed, which will fetch refs using ls-refs
if needed). If not, the remote helper interface will invoke
get_refs_list() if it hasn't been invoked yet, preserving existing
behavior.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-08-21 22:20:09 +00:00
|
|
|
data->get_refs_list_called = 1;
|
2009-08-05 05:01:53 +00:00
|
|
|
helper = get_helper(transport);
|
2009-09-04 02:13:51 +00:00
|
|
|
|
2020-05-25 19:59:03 +00:00
|
|
|
if (data->object_format) {
|
|
|
|
write_str_in_full(helper->in, "option object-format\n");
|
|
|
|
if (recvline(data, &buf) || strcmp(buf.buf, "ok"))
|
|
|
|
exit(128);
|
|
|
|
}
|
|
|
|
|
2009-10-31 00:47:30 +00:00
|
|
|
if (data->push && for_push)
|
|
|
|
write_str_in_full(helper->in, "list for-push\n");
|
|
|
|
else
|
|
|
|
write_str_in_full(helper->in, "list\n");
|
2009-08-05 05:01:53 +00:00
|
|
|
|
|
|
|
while (1) {
|
|
|
|
char *eov, *eon;
|
2014-04-12 20:33:29 +00:00
|
|
|
if (recvline(data, &buf))
|
|
|
|
exit(128);
|
2009-08-05 05:01:53 +00:00
|
|
|
|
|
|
|
if (!*buf.buf)
|
|
|
|
break;
|
2020-05-25 19:59:03 +00:00
|
|
|
else if (buf.buf[0] == ':') {
|
|
|
|
const char *value;
|
|
|
|
if (skip_prefix(buf.buf, ":object-format ", &value)) {
|
|
|
|
int algo = hash_algo_by_name(value);
|
|
|
|
if (algo == GIT_HASH_UNKNOWN)
|
|
|
|
die(_("unsupported object format '%s'"),
|
|
|
|
value);
|
|
|
|
transport->hash_algo = &hash_algos[algo];
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2009-08-05 05:01:53 +00:00
|
|
|
|
|
|
|
eov = strchr(buf.buf, ' ');
|
|
|
|
if (!eov)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("malformed response in ref list: %s"), buf.buf);
|
2009-08-05 05:01:53 +00:00
|
|
|
eon = strchr(eov + 1, ' ');
|
|
|
|
*eov = '\0';
|
|
|
|
if (eon)
|
|
|
|
*eon = '\0';
|
|
|
|
*tail = alloc_ref(eov + 1);
|
|
|
|
if (buf.buf[0] == '@')
|
|
|
|
(*tail)->symref = xstrdup(buf.buf + 1);
|
|
|
|
else if (buf.buf[0] != '?')
|
2020-05-25 19:59:03 +00:00
|
|
|
get_oid_hex_algop(buf.buf, &(*tail)->old_oid, transport->hash_algo);
|
2009-11-18 01:42:30 +00:00
|
|
|
if (eon) {
|
|
|
|
if (has_attribute(eon + 1, "unchanged")) {
|
|
|
|
(*tail)->status |= REF_STATUS_UPTODATE;
|
2017-10-15 22:06:56 +00:00
|
|
|
if (read_ref((*tail)->name, &(*tail)->old_oid) < 0)
|
2018-07-21 07:49:19 +00:00
|
|
|
die(_("could not read ref %s"),
|
2015-07-31 23:57:57 +00:00
|
|
|
(*tail)->name);
|
2009-11-18 01:42:30 +00:00
|
|
|
}
|
|
|
|
}
|
2009-08-05 05:01:53 +00:00
|
|
|
tail = &((*tail)->next);
|
|
|
|
}
|
2009-12-09 15:26:27 +00:00
|
|
|
if (debug)
|
|
|
|
fprintf(stderr, "Debug: Read ref listing.\n");
|
2009-08-05 05:01:53 +00:00
|
|
|
strbuf_release(&buf);
|
|
|
|
|
|
|
|
for (posn = ret; posn; posn = posn->next)
|
|
|
|
resolve_remote_symref(posn, ret);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-12-22 15:14:09 +00:00
|
|
|
static int get_bundle_uri(struct transport *transport)
|
|
|
|
{
|
|
|
|
get_helper(transport);
|
|
|
|
|
2024-01-21 13:15:38 +00:00
|
|
|
if (process_connect(transport, 0))
|
2022-12-22 15:14:09 +00:00
|
|
|
return transport->vtable->get_bundle_uri(transport);
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-12-14 21:44:45 +00:00
|
|
|
static struct transport_vtable vtable = {
|
2021-08-05 01:25:36 +00:00
|
|
|
.set_option = set_helper_option,
|
|
|
|
.get_refs_list = get_refs_list,
|
2022-12-22 15:14:09 +00:00
|
|
|
.get_bundle_uri = get_bundle_uri,
|
2021-08-05 01:25:36 +00:00
|
|
|
.fetch_refs = fetch_refs,
|
|
|
|
.push_refs = push_refs,
|
|
|
|
.connect = connect_helper,
|
|
|
|
.disconnect = release_helper
|
2017-12-14 21:44:45 +00:00
|
|
|
};
|
|
|
|
|
2009-09-04 02:13:49 +00:00
|
|
|
int transport_helper_init(struct transport *transport, const char *name)
|
2009-08-05 05:01:53 +00:00
|
|
|
{
|
2014-05-26 15:33:56 +00:00
|
|
|
struct helper_data *data = xcalloc(1, sizeof(*data));
|
2009-09-04 02:13:49 +00:00
|
|
|
data->name = name;
|
2009-08-05 05:01:53 +00:00
|
|
|
|
transport: add a protocol-whitelist environment variable
If we are cloning an untrusted remote repository into a
sandbox, we may also want to fetch remote submodules in
order to get the complete view as intended by the other
side. However, that opens us up to attacks where a malicious
user gets us to clone something they would not otherwise
have access to (this is not necessarily a problem by itself,
but we may then act on the cloned contents in a way that
exposes them to the attacker).
Ideally such a setup would sandbox git entirely away from
high-value items, but this is not always practical or easy
to set up (e.g., OS network controls may block multiple
protocols, and we would want to enable some but not others).
We can help this case by providing a way to restrict
particular protocols. We use a whitelist in the environment.
This is more annoying to set up than a blacklist, but
defaults to safety if the set of protocols git supports
grows). If no whitelist is specified, we continue to default
to allowing all protocols (this is an "unsafe" default, but
since the minority of users will want this sandboxing
effect, it is the only sensible one).
A note on the tests: ideally these would all be in a single
test file, but the git-daemon and httpd test infrastructure
is an all-or-nothing proposition rather than a test-by-test
prerequisite. By putting them all together, we would be
unable to test the file-local code on machines without
apache.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-16 17:12:52 +00:00
|
|
|
transport_check_allowed(name);
|
|
|
|
|
2009-12-09 15:26:27 +00:00
|
|
|
if (getenv("GIT_TRANSPORT_HELPER_DEBUG"))
|
|
|
|
debug = 1;
|
|
|
|
|
list-objects-filter: add and use initializers
In 7e2619d8ff (list_objects_filter_options: plug leak of filter_spec
strings, 2022-09-08), we noted that the filter_spec string_list was
inconsistent in how it handled memory ownership of strings stored in the
list. The fix there was a bit of a band-aid to set the "strdup_strings"
variable right before adding anything.
That works OK, and it lets the users of the API continue to
zero-initialize the struct. But it makes the code a bit hard to follow
and accident-prone, as any other spots appending the filter_spec need to
think about whether to set the strdup_strings value, too (there's one
such spot in partial_clone_get_default_filter_spec(), which is probably
a possible memory leak).
So let's do that full cleanup now. We'll introduce a
LIST_OBJECTS_FILTER_INIT macro and matching function, and use them as
appropriate (though it is for the "_options" struct, this matches the
corresponding list_objects_filter_release() function).
This is harder than it seems! Many other structs, like
git_transport_data, embed the filter struct. So they need to initialize
it themselves even if the rest of the enclosing struct is OK with
zero-initialization. I found all of the relevant spots by grepping
manually for declarations of list_objects_filter_options. And then doing
so recursively for structs which embed it, and ones which embed those,
and so on.
I'm pretty sure I got everything, but there's no change that would alert
the compiler if any topics in flight added new declarations. To catch
this case, we now double-check in the parsing function that things were
initialized as expected and BUG() if appropriate.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-09-11 05:03:07 +00:00
|
|
|
list_objects_filter_init(&data->transport_options.filter_options);
|
|
|
|
|
2009-08-05 05:01:53 +00:00
|
|
|
transport->data = data;
|
2017-12-14 21:44:45 +00:00
|
|
|
transport->vtable = &vtable;
|
2009-12-09 15:26:31 +00:00
|
|
|
transport->smart_options = &(data->transport_options);
|
2009-08-05 05:01:53 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2010-10-12 16:39:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Linux pipes can buffer 65536 bytes at once (and most platforms can
|
|
|
|
* buffer less), so attempt reads and writes with up to that size.
|
|
|
|
*/
|
|
|
|
#define BUFFERSIZE 65536
|
|
|
|
/* This should be enough to hold debugging message. */
|
|
|
|
#define PBUFFERSIZE 8192
|
|
|
|
|
|
|
|
/* Print bidirectional transfer loop debug message. */
|
2013-07-10 00:18:40 +00:00
|
|
|
__attribute__((format (printf, 1, 2)))
|
2010-10-12 16:39:41 +00:00
|
|
|
static void transfer_debug(const char *fmt, ...)
|
|
|
|
{
|
2017-08-21 17:43:48 +00:00
|
|
|
/*
|
|
|
|
* NEEDSWORK: This function is sometimes used from multiple threads, and
|
|
|
|
* we end up using debug_enabled racily. That "should not matter" since
|
|
|
|
* we always write the same value, but it's still wrong. This function
|
|
|
|
* is listed in .tsan-suppressions for the time being.
|
|
|
|
*/
|
|
|
|
|
2010-10-12 16:39:41 +00:00
|
|
|
va_list args;
|
|
|
|
char msgbuf[PBUFFERSIZE];
|
|
|
|
static int debug_enabled = -1;
|
|
|
|
|
|
|
|
if (debug_enabled < 0)
|
|
|
|
debug_enabled = getenv("GIT_TRANSLOOP_DEBUG") ? 1 : 0;
|
|
|
|
if (!debug_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
va_start(args, fmt);
|
|
|
|
vsnprintf(msgbuf, PBUFFERSIZE, fmt, args);
|
|
|
|
va_end(args);
|
|
|
|
fprintf(stderr, "Transfer loop debugging: %s\n", msgbuf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Stream state: More data may be coming in this direction. */
|
2016-08-09 08:53:38 +00:00
|
|
|
#define SSTATE_TRANSFERRING 0
|
2010-10-12 16:39:41 +00:00
|
|
|
/*
|
|
|
|
* Stream state: No more data coming in this direction, flushing rest of
|
|
|
|
* data.
|
|
|
|
*/
|
|
|
|
#define SSTATE_FLUSHING 1
|
|
|
|
/* Stream state: Transfer in this direction finished. */
|
|
|
|
#define SSTATE_FINISHED 2
|
|
|
|
|
2016-08-09 08:53:38 +00:00
|
|
|
#define STATE_NEEDS_READING(state) ((state) <= SSTATE_TRANSFERRING)
|
2010-10-12 16:39:41 +00:00
|
|
|
#define STATE_NEEDS_WRITING(state) ((state) <= SSTATE_FLUSHING)
|
|
|
|
#define STATE_NEEDS_CLOSING(state) ((state) == SSTATE_FLUSHING)
|
|
|
|
|
|
|
|
/* Unidirectional transfer. */
|
|
|
|
struct unidirectional_transfer {
|
|
|
|
/* Source */
|
|
|
|
int src;
|
|
|
|
/* Destination */
|
|
|
|
int dest;
|
|
|
|
/* Is source socket? */
|
|
|
|
int src_is_sock;
|
|
|
|
/* Is destination socket? */
|
|
|
|
int dest_is_sock;
|
2013-04-11 22:36:10 +00:00
|
|
|
/* Transfer state (TRANSFERRING/FLUSHING/FINISHED) */
|
2010-10-12 16:39:41 +00:00
|
|
|
int state;
|
|
|
|
/* Buffer. */
|
|
|
|
char buf[BUFFERSIZE];
|
|
|
|
/* Buffer used. */
|
|
|
|
size_t bufuse;
|
|
|
|
/* Name of source. */
|
|
|
|
const char *src_name;
|
|
|
|
/* Name of destination. */
|
|
|
|
const char *dest_name;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Closes the target (for writing) if transfer has finished. */
|
|
|
|
static void udt_close_if_finished(struct unidirectional_transfer *t)
|
|
|
|
{
|
|
|
|
if (STATE_NEEDS_CLOSING(t->state) && !t->bufuse) {
|
|
|
|
t->state = SSTATE_FINISHED;
|
|
|
|
if (t->dest_is_sock)
|
|
|
|
shutdown(t->dest, SHUT_WR);
|
|
|
|
else
|
|
|
|
close(t->dest);
|
|
|
|
transfer_debug("Closed %s.", t->dest_name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-06 12:36:46 +00:00
|
|
|
* Tries to read data from source into buffer. If buffer is full,
|
2010-10-12 16:39:41 +00:00
|
|
|
* no data is read. Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
static int udt_do_read(struct unidirectional_transfer *t)
|
|
|
|
{
|
|
|
|
ssize_t bytes;
|
|
|
|
|
|
|
|
if (t->bufuse == BUFFERSIZE)
|
|
|
|
return 0; /* No space for more. */
|
|
|
|
|
|
|
|
transfer_debug("%s is readable", t->src_name);
|
2019-01-03 21:03:48 +00:00
|
|
|
bytes = xread(t->src, t->buf + t->bufuse, BUFFERSIZE - t->bufuse);
|
2018-01-11 06:31:10 +00:00
|
|
|
if (bytes < 0) {
|
2018-07-21 07:49:41 +00:00
|
|
|
error_errno(_("read(%s) failed"), t->src_name);
|
2010-10-12 16:39:41 +00:00
|
|
|
return -1;
|
|
|
|
} else if (bytes == 0) {
|
|
|
|
transfer_debug("%s EOF (with %i bytes in buffer)",
|
2013-07-10 00:18:40 +00:00
|
|
|
t->src_name, (int)t->bufuse);
|
2010-10-12 16:39:41 +00:00
|
|
|
t->state = SSTATE_FLUSHING;
|
|
|
|
} else if (bytes > 0) {
|
|
|
|
t->bufuse += bytes;
|
|
|
|
transfer_debug("Read %i bytes from %s (buffer now at %i)",
|
|
|
|
(int)bytes, t->src_name, (int)t->bufuse);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to write data from buffer into destination. If buffer is empty,
|
|
|
|
* no data is written. Returns 0 on success, -1 on error.
|
|
|
|
*/
|
|
|
|
static int udt_do_write(struct unidirectional_transfer *t)
|
|
|
|
{
|
2011-03-04 23:16:26 +00:00
|
|
|
ssize_t bytes;
|
2010-10-12 16:39:41 +00:00
|
|
|
|
|
|
|
if (t->bufuse == 0)
|
|
|
|
return 0; /* Nothing to write. */
|
|
|
|
|
|
|
|
transfer_debug("%s is writable", t->dest_name);
|
2014-01-17 14:17:09 +00:00
|
|
|
bytes = xwrite(t->dest, t->buf, t->bufuse);
|
2018-01-11 06:31:10 +00:00
|
|
|
if (bytes < 0) {
|
2018-07-21 07:49:41 +00:00
|
|
|
error_errno(_("write(%s) failed"), t->dest_name);
|
2010-10-12 16:39:41 +00:00
|
|
|
return -1;
|
|
|
|
} else if (bytes > 0) {
|
|
|
|
t->bufuse -= bytes;
|
|
|
|
if (t->bufuse)
|
|
|
|
memmove(t->buf, t->buf + bytes, t->bufuse);
|
|
|
|
transfer_debug("Wrote %i bytes to %s (buffer now at %i)",
|
|
|
|
(int)bytes, t->dest_name, (int)t->bufuse);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* State of bidirectional transfer loop. */
|
|
|
|
struct bidirectional_transfer_state {
|
|
|
|
/* Direction from program to git. */
|
|
|
|
struct unidirectional_transfer ptg;
|
|
|
|
/* Direction from git to program. */
|
|
|
|
struct unidirectional_transfer gtp;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void *udt_copy_task_routine(void *udt)
|
|
|
|
{
|
|
|
|
struct unidirectional_transfer *t = (struct unidirectional_transfer *)udt;
|
|
|
|
while (t->state != SSTATE_FINISHED) {
|
|
|
|
if (STATE_NEEDS_READING(t->state))
|
|
|
|
if (udt_do_read(t))
|
|
|
|
return NULL;
|
|
|
|
if (STATE_NEEDS_WRITING(t->state))
|
|
|
|
if (udt_do_write(t))
|
|
|
|
return NULL;
|
|
|
|
if (STATE_NEEDS_CLOSING(t->state))
|
|
|
|
udt_close_if_finished(t);
|
|
|
|
}
|
|
|
|
return udt; /* Just some non-NULL value. */
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef NO_PTHREADS
|
|
|
|
|
|
|
|
/*
|
2013-07-29 08:18:21 +00:00
|
|
|
* Join thread, with appropriate errors on failure. Name is name for the
|
2010-10-12 16:39:41 +00:00
|
|
|
* thread (for error messages). Returns 0 on success, 1 on failure.
|
|
|
|
*/
|
|
|
|
static int tloop_join(pthread_t thread, const char *name)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
void *tret;
|
|
|
|
err = pthread_join(thread, &tret);
|
|
|
|
if (!tret) {
|
2018-07-21 07:49:41 +00:00
|
|
|
error(_("%s thread failed"), name);
|
2010-10-12 16:39:41 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (err) {
|
2018-07-21 07:49:41 +00:00
|
|
|
error(_("%s thread failed to join: %s"), name, strerror(err));
|
2010-10-12 16:39:41 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Spawn the transfer tasks and then wait for them. Returns 0 on success,
|
|
|
|
* -1 on failure.
|
|
|
|
*/
|
|
|
|
static int tloop_spawnwait_tasks(struct bidirectional_transfer_state *s)
|
|
|
|
{
|
|
|
|
pthread_t gtp_thread;
|
|
|
|
pthread_t ptg_thread;
|
|
|
|
int err;
|
|
|
|
int ret = 0;
|
|
|
|
err = pthread_create(>p_thread, NULL, udt_copy_task_routine,
|
|
|
|
&s->gtp);
|
|
|
|
if (err)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("can't start thread for copying data: %s"), strerror(err));
|
2010-10-12 16:39:41 +00:00
|
|
|
err = pthread_create(&ptg_thread, NULL, udt_copy_task_routine,
|
|
|
|
&s->ptg);
|
|
|
|
if (err)
|
2018-07-21 07:49:41 +00:00
|
|
|
die(_("can't start thread for copying data: %s"), strerror(err));
|
2010-10-12 16:39:41 +00:00
|
|
|
|
|
|
|
ret |= tloop_join(gtp_thread, "Git to program copy");
|
|
|
|
ret |= tloop_join(ptg_thread, "Program to git copy");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
|
|
|
|
/* Close the source and target (for writing) for transfer. */
|
|
|
|
static void udt_kill_transfer(struct unidirectional_transfer *t)
|
|
|
|
{
|
|
|
|
t->state = SSTATE_FINISHED;
|
|
|
|
/*
|
|
|
|
* Socket read end left open isn't a disaster if nobody
|
|
|
|
* attempts to read from it (mingw compat headers do not
|
|
|
|
* have SHUT_RD)...
|
|
|
|
*
|
|
|
|
* We can't fully close the socket since otherwise gtp
|
|
|
|
* task would first close the socket it sends data to
|
|
|
|
* while closing the ptg file descriptors.
|
|
|
|
*/
|
|
|
|
if (!t->src_is_sock)
|
|
|
|
close(t->src);
|
|
|
|
if (t->dest_is_sock)
|
|
|
|
shutdown(t->dest, SHUT_WR);
|
|
|
|
else
|
|
|
|
close(t->dest);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2013-07-29 08:18:21 +00:00
|
|
|
* Join process, with appropriate errors on failure. Name is name for the
|
2010-10-12 16:39:41 +00:00
|
|
|
* process (for error messages). Returns 0 on success, 1 on failure.
|
|
|
|
*/
|
|
|
|
static int tloop_join(pid_t pid, const char *name)
|
|
|
|
{
|
|
|
|
int tret;
|
|
|
|
if (waitpid(pid, &tret, 0) < 0) {
|
2018-07-21 07:49:41 +00:00
|
|
|
error_errno(_("%s process failed to wait"), name);
|
2010-10-12 16:39:41 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (!WIFEXITED(tret) || WEXITSTATUS(tret)) {
|
2018-07-21 07:49:41 +00:00
|
|
|
error(_("%s process failed"), name);
|
2010-10-12 16:39:41 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Spawn the transfer tasks and then wait for them. Returns 0 on success,
|
|
|
|
* -1 on failure.
|
|
|
|
*/
|
|
|
|
static int tloop_spawnwait_tasks(struct bidirectional_transfer_state *s)
|
|
|
|
{
|
|
|
|
pid_t pid1, pid2;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Fork thread #1: git to program. */
|
|
|
|
pid1 = fork();
|
|
|
|
if (pid1 < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die_errno(_("can't start thread for copying data"));
|
2010-10-12 16:39:41 +00:00
|
|
|
else if (pid1 == 0) {
|
|
|
|
udt_kill_transfer(&s->ptg);
|
|
|
|
exit(udt_copy_task_routine(&s->gtp) ? 0 : 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fork thread #2: program to git. */
|
|
|
|
pid2 = fork();
|
|
|
|
if (pid2 < 0)
|
2018-07-21 07:49:41 +00:00
|
|
|
die_errno(_("can't start thread for copying data"));
|
2010-10-12 16:39:41 +00:00
|
|
|
else if (pid2 == 0) {
|
|
|
|
udt_kill_transfer(&s->gtp);
|
|
|
|
exit(udt_copy_task_routine(&s->ptg) ? 0 : 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Close both streams in parent as to not interfere with
|
|
|
|
* end of file detection and wait for both tasks to finish.
|
|
|
|
*/
|
|
|
|
udt_kill_transfer(&s->gtp);
|
|
|
|
udt_kill_transfer(&s->ptg);
|
|
|
|
ret |= tloop_join(pid1, "Git to program copy");
|
|
|
|
ret |= tloop_join(pid2, "Program to git copy");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copies data from stdin to output and from input to stdout simultaneously.
|
|
|
|
* Additionally filtering through given filter. If filter is NULL, uses
|
|
|
|
* identity filter.
|
|
|
|
*/
|
|
|
|
int bidirectional_transfer_loop(int input, int output)
|
|
|
|
{
|
|
|
|
struct bidirectional_transfer_state state;
|
|
|
|
|
|
|
|
/* Fill the state fields. */
|
|
|
|
state.ptg.src = input;
|
|
|
|
state.ptg.dest = 1;
|
|
|
|
state.ptg.src_is_sock = (input == output);
|
|
|
|
state.ptg.dest_is_sock = 0;
|
2016-08-09 08:53:38 +00:00
|
|
|
state.ptg.state = SSTATE_TRANSFERRING;
|
2010-10-12 16:39:41 +00:00
|
|
|
state.ptg.bufuse = 0;
|
|
|
|
state.ptg.src_name = "remote input";
|
|
|
|
state.ptg.dest_name = "stdout";
|
|
|
|
|
|
|
|
state.gtp.src = 0;
|
|
|
|
state.gtp.dest = output;
|
|
|
|
state.gtp.src_is_sock = 0;
|
|
|
|
state.gtp.dest_is_sock = (input == output);
|
2016-08-09 08:53:38 +00:00
|
|
|
state.gtp.state = SSTATE_TRANSFERRING;
|
2010-10-12 16:39:41 +00:00
|
|
|
state.gtp.bufuse = 0;
|
|
|
|
state.gtp.src_name = "stdin";
|
|
|
|
state.gtp.dest_name = "remote output";
|
|
|
|
|
|
|
|
return tloop_spawnwait_tasks(&state);
|
|
|
|
}
|
2020-04-17 09:45:36 +00:00
|
|
|
|
|
|
|
void reject_atomic_push(struct ref *remote_refs, int mirror_mode)
|
|
|
|
{
|
|
|
|
struct ref *ref;
|
|
|
|
|
|
|
|
/* Mark other refs as failed */
|
|
|
|
for (ref = remote_refs; ref; ref = ref->next) {
|
|
|
|
if (!ref->peer_ref && !mirror_mode)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
switch (ref->status) {
|
|
|
|
case REF_STATUS_NONE:
|
|
|
|
case REF_STATUS_OK:
|
|
|
|
case REF_STATUS_EXPECTING_REPORT:
|
|
|
|
ref->status = REF_STATUS_ATOMIC_PUSH_FAILED;
|
|
|
|
continue;
|
|
|
|
default:
|
|
|
|
break; /* do nothing */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|