2009-10-31 00:47:47 +00:00
|
|
|
#!/bin/sh
|
|
|
|
|
|
|
|
test_description='test smart fetching over http via http-backend'
|
|
|
|
. ./test-lib.sh
|
|
|
|
. "$TEST_DIRECTORY"/lib-httpd.sh
|
|
|
|
start_httpd
|
|
|
|
|
|
|
|
test_expect_success 'setup repository' '
|
2013-01-16 02:05:07 +00:00
|
|
|
git config push.default matching &&
|
2009-10-31 00:47:47 +00:00
|
|
|
echo content >file &&
|
|
|
|
git add file &&
|
|
|
|
git commit -m one
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'create http-accessible bare repository' '
|
|
|
|
mkdir "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
(cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
git --bare init
|
|
|
|
) &&
|
|
|
|
git remote add public "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
git push public master:master
|
|
|
|
'
|
|
|
|
|
2012-08-27 13:25:36 +00:00
|
|
|
setup_askpass_helper
|
|
|
|
|
2009-10-31 00:47:47 +00:00
|
|
|
test_expect_success 'clone http repository' '
|
2018-09-17 21:46:27 +00:00
|
|
|
cat >exp <<-\EOF &&
|
|
|
|
> GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1
|
|
|
|
> Accept: */*
|
|
|
|
> Accept-Encoding: ENCODINGS
|
|
|
|
> Pragma: no-cache
|
|
|
|
< HTTP/1.1 200 OK
|
|
|
|
< Pragma: no-cache
|
|
|
|
< Cache-Control: no-cache, max-age=0, must-revalidate
|
|
|
|
< Content-Type: application/x-git-upload-pack-advertisement
|
|
|
|
> POST /smart/repo.git/git-upload-pack HTTP/1.1
|
|
|
|
> Accept-Encoding: ENCODINGS
|
|
|
|
> Content-Type: application/x-git-upload-pack-request
|
|
|
|
> Accept: application/x-git-upload-pack-result
|
|
|
|
> Content-Length: xxx
|
|
|
|
< HTTP/1.1 200 OK
|
|
|
|
< Pragma: no-cache
|
|
|
|
< Cache-Control: no-cache, max-age=0, must-revalidate
|
|
|
|
< Content-Type: application/x-git-upload-pack-result
|
|
|
|
EOF
|
2019-12-24 01:01:10 +00:00
|
|
|
GIT_TRACE_CURL=true GIT_TEST_PROTOCOL_VERSION=0 \
|
2019-02-25 21:54:06 +00:00
|
|
|
git clone --quiet $HTTPD_URL/smart/repo.git clone 2>err &&
|
2009-10-31 00:47:47 +00:00
|
|
|
test_cmp file clone/file &&
|
|
|
|
tr '\''\015'\'' Q <err |
|
|
|
|
sed -e "
|
|
|
|
s/Q\$//
|
|
|
|
/^[*] /d
|
2016-09-05 10:24:44 +00:00
|
|
|
/^== Info:/d
|
|
|
|
/^=> Send header, /d
|
|
|
|
/^=> Send header:$/d
|
|
|
|
/^<= Recv header, /d
|
|
|
|
/^<= Recv header:$/d
|
|
|
|
s/=> Send header: //
|
|
|
|
s/= Recv header://
|
|
|
|
/^<= Recv data/d
|
|
|
|
/^=> Send data/d
|
2009-11-09 18:10:36 +00:00
|
|
|
/^$/d
|
|
|
|
/^< $/d
|
2009-10-31 00:47:47 +00:00
|
|
|
|
|
|
|
/^[^><]/{
|
|
|
|
s/^/> /
|
|
|
|
}
|
|
|
|
|
|
|
|
/^> User-Agent: /d
|
|
|
|
/^> Host: /d
|
2009-11-09 18:10:37 +00:00
|
|
|
/^> POST /,$ {
|
|
|
|
/^> Accept: [*]\\/[*]/d
|
|
|
|
}
|
2009-10-31 00:47:47 +00:00
|
|
|
s/^> Content-Length: .*/> Content-Length: xxx/
|
2009-11-09 18:10:36 +00:00
|
|
|
/^> 00..want /d
|
|
|
|
/^> 00.*done/d
|
2009-10-31 00:47:47 +00:00
|
|
|
|
|
|
|
/^< Server: /d
|
|
|
|
/^< Expires: /d
|
|
|
|
/^< Date: /d
|
|
|
|
/^< Content-Length: /d
|
|
|
|
/^< Transfer-Encoding: /d
|
2018-05-22 18:42:03 +00:00
|
|
|
" >actual &&
|
|
|
|
|
2019-02-25 21:54:12 +00:00
|
|
|
# NEEDSWORK: If the overspecification of the expected result is reduced, we
|
|
|
|
# might be able to run this test in all protocol versions.
|
2019-12-24 01:01:10 +00:00
|
|
|
if test "$GIT_TEST_PROTOCOL_VERSION" = 0
|
2019-02-25 21:54:12 +00:00
|
|
|
then
|
|
|
|
sed -e "s/^> Accept-Encoding: .*/> Accept-Encoding: ENCODINGS/" \
|
|
|
|
actual >actual.smudged &&
|
|
|
|
test_cmp exp actual.smudged &&
|
|
|
|
|
|
|
|
grep "Accept-Encoding:.*gzip" actual >actual.gzip &&
|
|
|
|
test_line_count = 2 actual.gzip
|
|
|
|
fi
|
2009-10-31 00:47:47 +00:00
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'fetch changes via http' '
|
|
|
|
echo content >>file &&
|
|
|
|
git commit -a -m two &&
|
2015-03-20 10:07:15 +00:00
|
|
|
git push public &&
|
2009-10-31 00:47:47 +00:00
|
|
|
(cd clone && git pull) &&
|
|
|
|
test_cmp file clone/file
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'used upload-pack service' '
|
2018-09-17 21:46:27 +00:00
|
|
|
cat >exp <<-\EOF &&
|
|
|
|
GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
|
|
|
|
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
|
|
|
|
GET /smart/repo.git/info/refs?service=git-upload-pack HTTP/1.1 200
|
|
|
|
POST /smart/repo.git/git-upload-pack HTTP/1.1 200
|
|
|
|
EOF
|
2019-02-25 21:54:12 +00:00
|
|
|
|
|
|
|
# NEEDSWORK: If the overspecification of the expected result is reduced, we
|
|
|
|
# might be able to run this test in all protocol versions.
|
2019-12-24 01:01:10 +00:00
|
|
|
if test "$GIT_TEST_PROTOCOL_VERSION" = 0
|
2019-02-25 21:54:12 +00:00
|
|
|
then
|
|
|
|
check_access_log exp
|
|
|
|
fi
|
2009-10-31 00:47:47 +00:00
|
|
|
'
|
|
|
|
|
2010-09-25 04:20:35 +00:00
|
|
|
test_expect_success 'follow redirects (301)' '
|
|
|
|
git clone $HTTPD_URL/smart-redir-perm/repo.git --quiet repo-p
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'follow redirects (302)' '
|
|
|
|
git clone $HTTPD_URL/smart-redir-temp/repo.git --quiet repo-t
|
|
|
|
'
|
|
|
|
|
remote-curl: rewrite base url from info/refs redirects
For efficiency and security reasons, an earlier commit in
this series taught http_get_* to re-write the base url based
on redirections we saw while making a specific request.
This commit wires that option into the info/refs request,
meaning that a redirect from
http://example.com/foo.git/info/refs
to
https://example.com/bar.git/info/refs
will behave as if "https://example.com/bar.git" had been
provided to git in the first place.
The tests bear some explanation. We introduce two new
hierearchies into the httpd test config:
1. Requests to /smart-redir-limited will work only for the
initial info/refs request, but not any subsequent
requests. As a result, we can confirm whether the
client is re-rooting its requests after the initial
contact, since otherwise it will fail (it will ask for
"repo.git/git-upload-pack", which is not redirected).
2. Requests to smart-redir-auth will redirect, and require
auth after the redirection. Since we are using the
redirected base for further requests, we also update
the credential struct, in order not to mislead the user
(or credential helpers) about which credential is
needed. We can therefore check the GIT_ASKPASS prompts
to make sure we are prompting for the new location.
Because we have neither multiple servers nor https
support in our test setup, we can only redirect between
paths, meaning we need to turn on
credential.useHttpPath to see the difference.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
2013-09-28 08:35:35 +00:00
|
|
|
test_expect_success 'redirects re-root further requests' '
|
|
|
|
git clone $HTTPD_URL/smart-redir-limited/repo.git repo-redir-limited
|
|
|
|
'
|
|
|
|
|
http: always update the base URL for redirects
If a malicious server redirects the initial ref
advertisement, it may be able to leak sha1s from other,
unrelated servers that the client has access to. For
example, imagine that Alice is a git user, she has access to
a private repository on a server hosted by Bob, and Mallory
runs a malicious server and wants to find out about Bob's
private repository.
Mallory asks Alice to clone an unrelated repository from her
over HTTP. When Alice's client contacts Mallory's server for
the initial ref advertisement, the server issues an HTTP
redirect for Bob's server. Alice contacts Bob's server and
gets the ref advertisement for the private repository. If
there is anything to fetch, she then follows up by asking
the server for one or more sha1 objects. But who is the
server?
If it is still Mallory's server, then Alice will leak the
existence of those sha1s to her.
Since commit c93c92f30 (http: update base URLs when we see
redirects, 2013-09-28), the client usually rewrites the base
URL such that all further requests will go to Bob's server.
But this is done by textually matching the URL. If we were
originally looking for "http://mallory/repo.git/info/refs",
and we got pointed at "http://bob/other.git/info/refs", then
we know that the right root is "http://bob/other.git".
If the redirect appears to change more than just the root,
we punt and continue to use the original server. E.g.,
imagine the redirect adds a URL component that Bob's server
will ignore, like "http://bob/other.git/info/refs?dummy=1".
We can solve this by aborting in this case rather than
silently continuing to use Mallory's server. In addition to
protecting from sha1 leakage, it's arguably safer and more
sane to refuse a confusing redirect like that in general.
For example, part of the motivation in c93c92f30 is
avoiding accidentally sending credentials over clear http,
just to get a response that says "try again over https". So
even in a non-malicious case, we'd prefer to err on the side
of caution.
The downside is that it's possible this will break a
legitimate but complicated server-side redirection scheme.
The setup given in the newly added test does work, but it's
convoluted enough that we don't need to care about it. A
more plausible case would be a server which redirects a
request for "info/refs?service=git-upload-pack" to just
"info/refs" (because it does not do smart HTTP, and for some
reason really dislikes query parameters). Right now we
would transparently downgrade to dumb-http, but with this
patch, we'd complain (and the user would have to set
GIT_SMART_HTTP=0 to fetch).
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-12-06 18:24:35 +00:00
|
|
|
test_expect_success 're-rooting dies on insane schemes' '
|
|
|
|
test_must_fail git clone $HTTPD_URL/insane-redir/repo.git insane
|
|
|
|
'
|
|
|
|
|
2012-08-27 13:25:36 +00:00
|
|
|
test_expect_success 'clone from password-protected repository' '
|
|
|
|
echo two >expect &&
|
2014-01-02 07:38:35 +00:00
|
|
|
set_askpass user@host pass@host &&
|
2012-08-27 13:25:36 +00:00
|
|
|
git clone --bare "$HTTPD_URL/auth/smart/repo.git" smart-auth &&
|
|
|
|
expect_askpass both user@host &&
|
|
|
|
git --git-dir=smart-auth log -1 --format=%s >actual &&
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
2012-08-27 13:25:53 +00:00
|
|
|
test_expect_success 'clone from auth-only-for-push repository' '
|
|
|
|
echo two >expect &&
|
|
|
|
set_askpass wrong &&
|
|
|
|
git clone --bare "$HTTPD_URL/auth-push/smart/repo.git" smart-noauth &&
|
|
|
|
expect_askpass none &&
|
|
|
|
git --git-dir=smart-noauth log -1 --format=%s >actual &&
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
remote-curl: retry failed requests for auth even with gzip
Commit b81401c taught the post_rpc function to retry the
http request after prompting for credentials. However, it
did not handle two cases:
1. If we have a large request, we do not retry. That's OK,
since we would have sent a probe (with retry) already.
2. If we are gzipping the request, we do not retry. That
was considered OK, because the intended use was for
push (e.g., listing refs is OK, but actually pushing
objects is not), and we never gzip on push.
This patch teaches post_rpc to retry even a gzipped request.
This has two advantages:
1. It is possible to configure a "half-auth" state for
fetching, where the set of refs and their sha1s are
advertised, but one cannot actually fetch objects.
This is not a recommended configuration, as it leaks
some information about what is in the repository (e.g.,
an attacker can try brute-forcing possible content in
your repository and checking whether it matches your
branch sha1). However, it can be slightly more
convenient, since a no-op fetch will not require a
password at all.
2. It future-proofs us should we decide to ever gzip more
requests.
Signed-off-by: Jeff King <peff@peff.net>
2012-10-31 11:29:16 +00:00
|
|
|
test_expect_success 'clone from auth-only-for-objects repository' '
|
|
|
|
echo two >expect &&
|
2014-01-02 07:38:35 +00:00
|
|
|
set_askpass user@host pass@host &&
|
remote-curl: retry failed requests for auth even with gzip
Commit b81401c taught the post_rpc function to retry the
http request after prompting for credentials. However, it
did not handle two cases:
1. If we have a large request, we do not retry. That's OK,
since we would have sent a probe (with retry) already.
2. If we are gzipping the request, we do not retry. That
was considered OK, because the intended use was for
push (e.g., listing refs is OK, but actually pushing
objects is not), and we never gzip on push.
This patch teaches post_rpc to retry even a gzipped request.
This has two advantages:
1. It is possible to configure a "half-auth" state for
fetching, where the set of refs and their sha1s are
advertised, but one cannot actually fetch objects.
This is not a recommended configuration, as it leaks
some information about what is in the repository (e.g.,
an attacker can try brute-forcing possible content in
your repository and checking whether it matches your
branch sha1). However, it can be slightly more
convenient, since a no-op fetch will not require a
password at all.
2. It future-proofs us should we decide to ever gzip more
requests.
Signed-off-by: Jeff King <peff@peff.net>
2012-10-31 11:29:16 +00:00
|
|
|
git clone --bare "$HTTPD_URL/auth-fetch/smart/repo.git" half-auth &&
|
|
|
|
expect_askpass both user@host &&
|
|
|
|
git --git-dir=half-auth log -1 --format=%s >actual &&
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'no-op half-auth fetch does not require a password' '
|
|
|
|
set_askpass wrong &&
|
2019-03-22 19:01:39 +00:00
|
|
|
|
|
|
|
# NEEDSWORK: When using HTTP(S), protocol v0 supports a "half-auth"
|
|
|
|
# configuration with authentication required only when downloading
|
|
|
|
# objects and not refs, by having the HTTP server only require
|
|
|
|
# authentication for the "git-upload-pack" path and not "info/refs".
|
|
|
|
# This is not possible with protocol v2, since both objects and refs
|
|
|
|
# are obtained from the "git-upload-pack" path. A solution to this is
|
|
|
|
# to teach the server and client to be able to inline ls-refs requests
|
|
|
|
# as an Extra Parameter (see pack-protocol.txt), so that "info/refs"
|
|
|
|
# can serve refs, just like it does in protocol v0.
|
|
|
|
GIT_TEST_PROTOCOL_VERSION=0 git --git-dir=half-auth fetch &&
|
remote-curl: retry failed requests for auth even with gzip
Commit b81401c taught the post_rpc function to retry the
http request after prompting for credentials. However, it
did not handle two cases:
1. If we have a large request, we do not retry. That's OK,
since we would have sent a probe (with retry) already.
2. If we are gzipping the request, we do not retry. That
was considered OK, because the intended use was for
push (e.g., listing refs is OK, but actually pushing
objects is not), and we never gzip on push.
This patch teaches post_rpc to retry even a gzipped request.
This has two advantages:
1. It is possible to configure a "half-auth" state for
fetching, where the set of refs and their sha1s are
advertised, but one cannot actually fetch objects.
This is not a recommended configuration, as it leaks
some information about what is in the repository (e.g.,
an attacker can try brute-forcing possible content in
your repository and checking whether it matches your
branch sha1). However, it can be slightly more
convenient, since a no-op fetch will not require a
password at all.
2. It future-proofs us should we decide to ever gzip more
requests.
Signed-off-by: Jeff King <peff@peff.net>
2012-10-31 11:29:16 +00:00
|
|
|
expect_askpass none
|
|
|
|
'
|
|
|
|
|
remote-curl: rewrite base url from info/refs redirects
For efficiency and security reasons, an earlier commit in
this series taught http_get_* to re-write the base url based
on redirections we saw while making a specific request.
This commit wires that option into the info/refs request,
meaning that a redirect from
http://example.com/foo.git/info/refs
to
https://example.com/bar.git/info/refs
will behave as if "https://example.com/bar.git" had been
provided to git in the first place.
The tests bear some explanation. We introduce two new
hierearchies into the httpd test config:
1. Requests to /smart-redir-limited will work only for the
initial info/refs request, but not any subsequent
requests. As a result, we can confirm whether the
client is re-rooting its requests after the initial
contact, since otherwise it will fail (it will ask for
"repo.git/git-upload-pack", which is not redirected).
2. Requests to smart-redir-auth will redirect, and require
auth after the redirection. Since we are using the
redirected base for further requests, we also update
the credential struct, in order not to mislead the user
(or credential helpers) about which credential is
needed. We can therefore check the GIT_ASKPASS prompts
to make sure we are prompting for the new location.
Because we have neither multiple servers nor https
support in our test setup, we can only redirect between
paths, meaning we need to turn on
credential.useHttpPath to see the difference.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
2013-09-28 08:35:35 +00:00
|
|
|
test_expect_success 'redirects send auth to new location' '
|
2014-01-02 07:38:35 +00:00
|
|
|
set_askpass user@host pass@host &&
|
remote-curl: rewrite base url from info/refs redirects
For efficiency and security reasons, an earlier commit in
this series taught http_get_* to re-write the base url based
on redirections we saw while making a specific request.
This commit wires that option into the info/refs request,
meaning that a redirect from
http://example.com/foo.git/info/refs
to
https://example.com/bar.git/info/refs
will behave as if "https://example.com/bar.git" had been
provided to git in the first place.
The tests bear some explanation. We introduce two new
hierearchies into the httpd test config:
1. Requests to /smart-redir-limited will work only for the
initial info/refs request, but not any subsequent
requests. As a result, we can confirm whether the
client is re-rooting its requests after the initial
contact, since otherwise it will fail (it will ask for
"repo.git/git-upload-pack", which is not redirected).
2. Requests to smart-redir-auth will redirect, and require
auth after the redirection. Since we are using the
redirected base for further requests, we also update
the credential struct, in order not to mislead the user
(or credential helpers) about which credential is
needed. We can therefore check the GIT_ASKPASS prompts
to make sure we are prompting for the new location.
Because we have neither multiple servers nor https
support in our test setup, we can only redirect between
paths, meaning we need to turn on
credential.useHttpPath to see the difference.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
2013-09-28 08:35:35 +00:00
|
|
|
git -c credential.useHttpPath=true \
|
|
|
|
clone $HTTPD_URL/smart-redir-auth/repo.git repo-redir-auth &&
|
|
|
|
expect_askpass both user@host auth/smart/repo.git
|
|
|
|
'
|
|
|
|
|
2020-05-11 17:43:09 +00:00
|
|
|
test_expect_success 'GIT_TRACE_CURL redacts auth details' '
|
|
|
|
rm -rf redact-auth trace &&
|
|
|
|
set_askpass user@host pass@host &&
|
|
|
|
GIT_TRACE_CURL="$(pwd)/trace" git clone --bare "$HTTPD_URL/auth/smart/repo.git" redact-auth &&
|
|
|
|
expect_askpass both user@host &&
|
|
|
|
|
|
|
|
# Ensure that there is no "Basic" followed by a base64 string, but that
|
|
|
|
# the auth details are redacted
|
|
|
|
! grep "Authorization: Basic [0-9a-zA-Z+/]" trace &&
|
|
|
|
grep "Authorization: Basic <redacted>" trace
|
|
|
|
'
|
|
|
|
|
2020-05-11 17:43:10 +00:00
|
|
|
test_expect_success 'GIT_CURL_VERBOSE redacts auth details' '
|
|
|
|
rm -rf redact-auth trace &&
|
|
|
|
set_askpass user@host pass@host &&
|
|
|
|
GIT_CURL_VERBOSE=1 git clone --bare "$HTTPD_URL/auth/smart/repo.git" redact-auth 2>trace &&
|
|
|
|
expect_askpass both user@host &&
|
|
|
|
|
|
|
|
# Ensure that there is no "Basic" followed by a base64 string, but that
|
|
|
|
# the auth details are redacted
|
|
|
|
! grep "Authorization: Basic [0-9a-zA-Z+/]" trace &&
|
|
|
|
grep "Authorization: Basic <redacted>" trace
|
|
|
|
'
|
|
|
|
|
2020-06-05 21:21:36 +00:00
|
|
|
test_expect_success 'GIT_TRACE_CURL does not redact auth details if GIT_TRACE_REDACT=0' '
|
|
|
|
rm -rf redact-auth trace &&
|
|
|
|
set_askpass user@host pass@host &&
|
|
|
|
GIT_TRACE_REDACT=0 GIT_TRACE_CURL="$(pwd)/trace" \
|
|
|
|
git clone --bare "$HTTPD_URL/auth/smart/repo.git" redact-auth &&
|
|
|
|
expect_askpass both user@host &&
|
|
|
|
|
|
|
|
grep "Authorization: Basic [0-9a-zA-Z+/]" trace
|
|
|
|
'
|
|
|
|
|
2012-09-20 21:30:58 +00:00
|
|
|
test_expect_success 'disable dumb http on server' '
|
|
|
|
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
|
|
|
|
config http.getanyfile false
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'GIT_SMART_HTTP can disable smart http' '
|
|
|
|
(GIT_SMART_HTTP=0 &&
|
|
|
|
export GIT_SMART_HTTP &&
|
|
|
|
cd clone &&
|
|
|
|
test_must_fail git fetch)
|
|
|
|
'
|
|
|
|
|
2013-01-31 21:02:07 +00:00
|
|
|
test_expect_success 'invalid Content-Type rejected' '
|
2015-03-20 10:06:15 +00:00
|
|
|
test_must_fail git clone $HTTPD_URL/broken_smart/repo.git 2>actual &&
|
2019-06-24 12:44:46 +00:00
|
|
|
test_i18ngrep "not valid:" actual
|
2013-01-31 21:02:07 +00:00
|
|
|
'
|
|
|
|
|
2013-04-10 00:55:08 +00:00
|
|
|
test_expect_success 'create namespaced refs' '
|
|
|
|
test_commit namespaced &&
|
|
|
|
git push public HEAD:refs/namespaces/ns/refs/heads/master &&
|
|
|
|
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
|
|
|
|
symbolic-ref refs/namespaces/ns/HEAD refs/namespaces/ns/refs/heads/master
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'smart clone respects namespace' '
|
|
|
|
git clone "$HTTPD_URL/smart_namespace/repo.git" ns-smart &&
|
|
|
|
echo namespaced >expect &&
|
|
|
|
git --git-dir=ns-smart/.git log -1 --format=%s >actual &&
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'dumb clone via http-backend respects namespace' '
|
|
|
|
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
|
|
|
|
config http.getanyfile true &&
|
|
|
|
GIT_SMART_HTTP=0 git clone \
|
|
|
|
"$HTTPD_URL/smart_namespace/repo.git" ns-dumb &&
|
|
|
|
echo namespaced >expect &&
|
|
|
|
git --git-dir=ns-dumb/.git log -1 --format=%s >actual &&
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
2013-07-23 22:40:17 +00:00
|
|
|
test_expect_success 'cookies stored in http.cookiefile when http.savecookies set' '
|
2018-09-17 21:46:27 +00:00
|
|
|
cat >cookies.txt <<-\EOF &&
|
|
|
|
127.0.0.1 FALSE /smart_cookies/ FALSE 0 othername othervalue
|
|
|
|
EOF
|
2018-09-17 21:46:28 +00:00
|
|
|
sort >expect_cookies.txt <<-\EOF &&
|
2018-09-17 21:46:27 +00:00
|
|
|
|
|
|
|
127.0.0.1 FALSE /smart_cookies/ FALSE 0 othername othervalue
|
|
|
|
127.0.0.1 FALSE /smart_cookies/repo.git/info/ FALSE 0 name value
|
|
|
|
EOF
|
2013-07-23 22:40:17 +00:00
|
|
|
git config http.cookiefile cookies.txt &&
|
|
|
|
git config http.savecookies true &&
|
|
|
|
git ls-remote $HTTPD_URL/smart_cookies/repo.git master &&
|
2019-02-25 21:54:12 +00:00
|
|
|
|
|
|
|
# NEEDSWORK: If the overspecification of the expected result is reduced, we
|
|
|
|
# might be able to run this test in all protocol versions.
|
2019-12-24 01:01:10 +00:00
|
|
|
if test "$GIT_TEST_PROTOCOL_VERSION" = 0
|
2019-02-25 21:54:12 +00:00
|
|
|
then
|
|
|
|
tail -3 cookies.txt | sort >cookies_tail.txt &&
|
|
|
|
test_cmp expect_cookies.txt cookies_tail.txt
|
|
|
|
fi
|
2013-07-23 22:40:17 +00:00
|
|
|
'
|
|
|
|
|
upload-pack: fix transfer.hiderefs over smart-http
When upload-pack advertises the refs (either for a normal,
non-stateless request, or for the initial contact in a
stateless one), we call for_each_ref with the send_ref
function as its callback. send_ref, in turn, calls
mark_our_ref, which checks whether the ref is hidden, and
sets OUR_REF or HIDDEN_REF on the object as appropriate. If
it is hidden, mark_our_ref also returns "1" to signal
send_ref that the ref should not be advertised.
If we are not advertising refs, (i.e., the follow-up
invocation by an http client to send its "want" lines), we
use mark_our_ref directly as a callback to for_each_ref. Its
marking does the right thing, but when it then returns "1"
to for_each_ref, the latter interprets this as an error and
stops iterating. As a result, we skip marking all of the
refs that come lexicographically after it. Any "want" lines
from the client asking for those objects will fail, as they
were not properly marked with OUR_REF.
To solve this, we introduce a wrapper callback around
mark_our_ref which always returns 0 (even if the ref is
hidden, we want to keep iterating). We also tweak the
signature of mark_our_ref to exclude unnecessary parameters
that were present only to conform to the callback interface.
This should make it less likely for somebody to accidentally
use it as a callback in the future.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-13 04:42:12 +00:00
|
|
|
test_expect_success 'transfer.hiderefs works over smart-http' '
|
|
|
|
test_commit hidden &&
|
|
|
|
test_commit visible &&
|
|
|
|
git push public HEAD^:refs/heads/a HEAD:refs/heads/b &&
|
|
|
|
git --git-dir="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" \
|
|
|
|
config transfer.hiderefs refs/heads/a &&
|
|
|
|
git clone --bare "$HTTPD_URL/smart/repo.git" hidden.git &&
|
|
|
|
test_must_fail git -C hidden.git rev-parse --verify a &&
|
|
|
|
git -C hidden.git rev-parse --verify b
|
|
|
|
'
|
|
|
|
|
2015-05-20 07:36:43 +00:00
|
|
|
# create an arbitrary number of tags, numbered from tag-$1 to tag-$2
|
|
|
|
create_tags () {
|
|
|
|
rm -f marks &&
|
|
|
|
for i in $(test_seq "$1" "$2")
|
2012-04-02 15:17:03 +00:00
|
|
|
do
|
2015-05-20 07:36:43 +00:00
|
|
|
# don't use here-doc, because it requires a process
|
|
|
|
# per loop iteration
|
|
|
|
echo "commit refs/heads/too-many-refs-$1" &&
|
|
|
|
echo "mark :$i" &&
|
|
|
|
echo "committer git <git@example.com> $i +0000" &&
|
|
|
|
echo "data 0" &&
|
|
|
|
echo "M 644 inline bla.txt" &&
|
|
|
|
echo "data 4" &&
|
|
|
|
echo "bla" &&
|
2012-04-02 15:17:03 +00:00
|
|
|
# make every commit dangling by always
|
|
|
|
# rewinding the branch after each commit
|
2015-05-20 07:36:43 +00:00
|
|
|
echo "reset refs/heads/too-many-refs-$1" &&
|
|
|
|
echo "from :$1"
|
2012-04-02 15:17:03 +00:00
|
|
|
done | git fast-import --export-marks=marks &&
|
|
|
|
|
|
|
|
# now assign tags to all the dangling commits we created above
|
2013-10-29 01:23:03 +00:00
|
|
|
tag=$(perl -e "print \"bla\" x 30") &&
|
2013-05-12 22:50:59 +00:00
|
|
|
sed -e "s|^:\([^ ]*\) \(.*\)$|\2 refs/tags/$tag-\1|" <marks >>packed-refs
|
2015-05-20 07:36:43 +00:00
|
|
|
}
|
|
|
|
|
2015-05-26 03:44:04 +00:00
|
|
|
test_expect_success 'create 2,000 tags in the repo' '
|
2015-05-20 07:36:43 +00:00
|
|
|
(
|
|
|
|
cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
2015-05-26 03:44:04 +00:00
|
|
|
create_tags 1 2000
|
2012-04-02 15:17:03 +00:00
|
|
|
)
|
|
|
|
'
|
|
|
|
|
2015-03-13 04:57:05 +00:00
|
|
|
test_expect_success CMDLINE_LIMIT \
|
|
|
|
'clone the 2,000 tag repo to check OS command line overflow' '
|
|
|
|
run_with_limited_cmdline git clone $HTTPD_URL/smart/repo.git too-many-refs &&
|
2013-05-12 22:50:59 +00:00
|
|
|
(
|
|
|
|
cd too-many-refs &&
|
2015-03-13 04:57:05 +00:00
|
|
|
git for-each-ref refs/tags >actual &&
|
|
|
|
test_line_count = 2000 actual
|
2013-05-12 22:50:59 +00:00
|
|
|
)
|
2012-04-02 15:17:03 +00:00
|
|
|
'
|
|
|
|
|
t5551: test usage of chunked encoding explicitly
When run using GIT_TEST_PROTOCOL_VERSION=2, a test in t5551 fails
because 4 POSTs (probe, ls-refs, probe, fetch) are sent instead of 2
(probe, fetch).
One way to resolve this would be to relax the condition (from "= 2" to
greater than 1, say), but upon further inspection, the test probably
shouldn't be counting the number of POSTs. This test states that large
requests are split across POSTs, but this is not correct; the main
change is that chunked transfer encoding is used, but the request is
still contained within one POST. (The test coincidentally works because
Git indeed sends 2 POSTs in the case of a large request, but that is
because, as stated above, the first POST is a probing RPC - see
post_rpc() in remote-curl.c for more information.)
Therefore, instead of counting POSTs, check that chunked transfer
encoding is used. This also has the desirable side effect of passing
with GIT_TEST_PROTOCOL_VERSION=2.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Acked-by: Derrick Stolee <dstolee@microsoft.com>
Acked-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-06-05 19:26:24 +00:00
|
|
|
test_expect_success 'large fetch-pack requests can be sent using chunked encoding' '
|
2016-09-05 10:24:44 +00:00
|
|
|
GIT_TRACE_CURL=true git -c http.postbuffer=65536 \
|
2015-03-13 04:57:05 +00:00
|
|
|
clone --bare "$HTTPD_URL/smart/repo.git" split.git 2>err &&
|
t5551: test usage of chunked encoding explicitly
When run using GIT_TEST_PROTOCOL_VERSION=2, a test in t5551 fails
because 4 POSTs (probe, ls-refs, probe, fetch) are sent instead of 2
(probe, fetch).
One way to resolve this would be to relax the condition (from "= 2" to
greater than 1, say), but upon further inspection, the test probably
shouldn't be counting the number of POSTs. This test states that large
requests are split across POSTs, but this is not correct; the main
change is that chunked transfer encoding is used, but the request is
still contained within one POST. (The test coincidentally works because
Git indeed sends 2 POSTs in the case of a large request, but that is
because, as stated above, the first POST is a probing RPC - see
post_rpc() in remote-curl.c for more information.)
Therefore, instead of counting POSTs, check that chunked transfer
encoding is used. This also has the desirable side effect of passing
with GIT_TEST_PROTOCOL_VERSION=2.
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Acked-by: Derrick Stolee <dstolee@microsoft.com>
Acked-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-06-05 19:26:24 +00:00
|
|
|
grep "^=> Send header: Transfer-Encoding: chunked" err
|
2015-03-13 04:57:05 +00:00
|
|
|
'
|
|
|
|
|
remote-curl: don't hang when a server dies before any output
In the event that a HTTP server closes the connection after giving a
200 but before giving any packets, we don't want to hang forever
waiting for a response that will never come. Instead, we should die
immediately.
One case where this happens is when attempting to fetch a dangling
object by its object name. In this case, the server dies before
sending any data. Prior to this patch, fetch-pack would wait for
data from the server, and remote-curl would wait for fetch-pack,
causing a deadlock.
Despite this patch, there is other possible malformed input that could
cause the same deadlock (e.g. a half-finished pktline, or a pktline but
no trailing flush). There are a few possible solutions to this:
1. Allowing remote-curl to tell fetch-pack about the EOF (so that
fetch-pack could know that no more data is coming until it says
something else). This is tricky because an out-of-band signal would
be required, or the http response would have to be re-framed inside
another layer of pkt-line or something.
2. Make remote-curl understand some of the protocol. It turns out
that in addition to understanding pkt-line, it would need to watch for
ack/nak. This is somewhat fragile, as information about the protocol
would end up in two places. Also, pkt-lines which are already at the
length limit would need special handling.
Both of these solutions would require a fair amount of work, whereas
this hack is easy and solves at least some of the problem.
Still to do: it would be good to give a better error message
than "fatal: The remote end hung up unexpectedly".
Signed-off-by: David Turner <dturner@twosigma.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-11-18 20:30:49 +00:00
|
|
|
test_expect_success 'test allowreachablesha1inwant' '
|
|
|
|
test_when_finished "rm -rf test_reachable.git" &&
|
|
|
|
server="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
master_sha=$(git -C "$server" rev-parse refs/heads/master) &&
|
|
|
|
git -C "$server" config uploadpack.allowreachablesha1inwant 1 &&
|
|
|
|
|
|
|
|
git init --bare test_reachable.git &&
|
|
|
|
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
|
|
|
|
git -C test_reachable.git fetch origin "$master_sha"
|
|
|
|
'
|
|
|
|
|
|
|
|
test_expect_success 'test allowreachablesha1inwant with unreachable' '
|
|
|
|
test_when_finished "rm -rf test_reachable.git; git reset --hard $(git rev-parse HEAD)" &&
|
|
|
|
|
|
|
|
#create unreachable sha
|
|
|
|
echo content >file2 &&
|
|
|
|
git add file2 &&
|
|
|
|
git commit -m two &&
|
|
|
|
git push public HEAD:refs/heads/doomed &&
|
|
|
|
git push public :refs/heads/doomed &&
|
|
|
|
|
|
|
|
server="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
master_sha=$(git -C "$server" rev-parse refs/heads/master) &&
|
|
|
|
git -C "$server" config uploadpack.allowreachablesha1inwant 1 &&
|
|
|
|
|
|
|
|
git init --bare test_reachable.git &&
|
|
|
|
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
|
2019-02-25 21:54:08 +00:00
|
|
|
# Some protocol versions (e.g. 2) support fetching
|
|
|
|
# unadvertised objects, so restrict this test to v0.
|
2019-12-24 01:01:10 +00:00
|
|
|
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
|
2019-02-25 21:54:08 +00:00
|
|
|
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
|
remote-curl: don't hang when a server dies before any output
In the event that a HTTP server closes the connection after giving a
200 but before giving any packets, we don't want to hang forever
waiting for a response that will never come. Instead, we should die
immediately.
One case where this happens is when attempting to fetch a dangling
object by its object name. In this case, the server dies before
sending any data. Prior to this patch, fetch-pack would wait for
data from the server, and remote-curl would wait for fetch-pack,
causing a deadlock.
Despite this patch, there is other possible malformed input that could
cause the same deadlock (e.g. a half-finished pktline, or a pktline but
no trailing flush). There are a few possible solutions to this:
1. Allowing remote-curl to tell fetch-pack about the EOF (so that
fetch-pack could know that no more data is coming until it says
something else). This is tricky because an out-of-band signal would
be required, or the http response would have to be re-framed inside
another layer of pkt-line or something.
2. Make remote-curl understand some of the protocol. It turns out
that in addition to understanding pkt-line, it would need to watch for
ack/nak. This is somewhat fragile, as information about the protocol
would end up in two places. Also, pkt-lines which are already at the
length limit would need special handling.
Both of these solutions would require a fair amount of work, whereas
this hack is easy and solves at least some of the problem.
Still to do: it would be good to give a better error message
than "fatal: The remote end hung up unexpectedly".
Signed-off-by: David Turner <dturner@twosigma.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-11-18 20:30:49 +00:00
|
|
|
'
|
|
|
|
|
2016-11-11 17:23:48 +00:00
|
|
|
test_expect_success 'test allowanysha1inwant with unreachable' '
|
|
|
|
test_when_finished "rm -rf test_reachable.git; git reset --hard $(git rev-parse HEAD)" &&
|
|
|
|
|
|
|
|
#create unreachable sha
|
|
|
|
echo content >file2 &&
|
|
|
|
git add file2 &&
|
|
|
|
git commit -m two &&
|
|
|
|
git push public HEAD:refs/heads/doomed &&
|
|
|
|
git push public :refs/heads/doomed &&
|
|
|
|
|
|
|
|
server="$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
master_sha=$(git -C "$server" rev-parse refs/heads/master) &&
|
|
|
|
git -C "$server" config uploadpack.allowreachablesha1inwant 1 &&
|
|
|
|
|
|
|
|
git init --bare test_reachable.git &&
|
|
|
|
git -C test_reachable.git remote add origin "$HTTPD_URL/smart/repo.git" &&
|
2019-02-25 21:54:08 +00:00
|
|
|
# Some protocol versions (e.g. 2) support fetching
|
|
|
|
# unadvertised objects, so restrict this test to v0.
|
2019-12-24 01:01:10 +00:00
|
|
|
test_must_fail env GIT_TEST_PROTOCOL_VERSION=0 \
|
2019-02-25 21:54:08 +00:00
|
|
|
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)" &&
|
2016-11-11 17:23:48 +00:00
|
|
|
|
|
|
|
git -C "$server" config uploadpack.allowanysha1inwant 1 &&
|
|
|
|
git -C test_reachable.git fetch origin "$(git rev-parse HEAD)"
|
|
|
|
'
|
|
|
|
|
http-backend: spool ref negotiation requests to buffer
When http-backend spawns "upload-pack" to do ref
negotiation, it streams the http request body to
upload-pack, who then streams the http response back to the
client as it reads. In theory, git can go full-duplex; the
client can consume our response while it is still sending
the request. In practice, however, HTTP is a half-duplex
protocol. Even if our client is ready to read and write
simultaneously, we may have other HTTP infrastructure in the
way, including the webserver that spawns our CGI, or any
intermediate proxies.
In at least one documented case[1], this leads to deadlock
when trying a fetch over http. What happens is basically:
1. Apache proxies the request to the CGI, http-backend.
2. http-backend gzip-inflates the data and sends
the result to upload-pack.
3. upload-pack acts on the data and generates output over
the pipe back to Apache. Apache isn't reading because
it's busy writing (step 1).
This works fine most of the time, because the upload-pack
output ends up in a system pipe buffer, and Apache reads
it as soon as it finishes writing. But if both the request
and the response exceed the system pipe buffer size, then we
deadlock (Apache blocks writing to http-backend,
http-backend blocks writing to upload-pack, and upload-pack
blocks writing to Apache).
We need to break the deadlock by spooling either the input
or the output. In this case, it's ideal to spool the input,
because Apache does not start reading either stdout _or_
stderr until we have consumed all of the input. So until we
do so, we cannot even get an error message out to the
client.
The solution is fairly straight-forward: we read the request
body into an in-memory buffer in http-backend, freeing up
Apache, and then feed the data ourselves to upload-pack. But
there are a few important things to note:
1. We limit the in-memory buffer to prevent an obvious
denial-of-service attack. This is a new hard limit on
requests, but it's unlikely to come into play. The
default value is 10MB, which covers even the ridiculous
100,000-ref negotation in the included test (that
actually caps out just over 5MB). But it's configurable
on the off chance that you don't mind spending some
extra memory to make even ridiculous requests work.
2. We must take care only to buffer when we have to. For
pushes, the incoming packfile may be of arbitrary
size, and we should connect the input directly to
receive-pack. There's no deadlock problem here, though,
because we do not produce any output until the whole
packfile has been read.
For upload-pack's initial ref advertisement, we
similarly do not need to buffer. Even though we may
generate a lot of output, there is no request body at
all (i.e., it is a GET, not a POST).
[1] http://article.gmane.org/gmane.comp.version-control.git/269020
Test-adapted-from: Dennis Kaarsemaker <dennis@kaarsemaker.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-05-20 07:37:09 +00:00
|
|
|
test_expect_success EXPENSIVE 'http can handle enormous ref negotiation' '
|
2015-05-26 03:44:04 +00:00
|
|
|
(
|
|
|
|
cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
create_tags 2001 50000
|
|
|
|
) &&
|
http-backend: spool ref negotiation requests to buffer
When http-backend spawns "upload-pack" to do ref
negotiation, it streams the http request body to
upload-pack, who then streams the http response back to the
client as it reads. In theory, git can go full-duplex; the
client can consume our response while it is still sending
the request. In practice, however, HTTP is a half-duplex
protocol. Even if our client is ready to read and write
simultaneously, we may have other HTTP infrastructure in the
way, including the webserver that spawns our CGI, or any
intermediate proxies.
In at least one documented case[1], this leads to deadlock
when trying a fetch over http. What happens is basically:
1. Apache proxies the request to the CGI, http-backend.
2. http-backend gzip-inflates the data and sends
the result to upload-pack.
3. upload-pack acts on the data and generates output over
the pipe back to Apache. Apache isn't reading because
it's busy writing (step 1).
This works fine most of the time, because the upload-pack
output ends up in a system pipe buffer, and Apache reads
it as soon as it finishes writing. But if both the request
and the response exceed the system pipe buffer size, then we
deadlock (Apache blocks writing to http-backend,
http-backend blocks writing to upload-pack, and upload-pack
blocks writing to Apache).
We need to break the deadlock by spooling either the input
or the output. In this case, it's ideal to spool the input,
because Apache does not start reading either stdout _or_
stderr until we have consumed all of the input. So until we
do so, we cannot even get an error message out to the
client.
The solution is fairly straight-forward: we read the request
body into an in-memory buffer in http-backend, freeing up
Apache, and then feed the data ourselves to upload-pack. But
there are a few important things to note:
1. We limit the in-memory buffer to prevent an obvious
denial-of-service attack. This is a new hard limit on
requests, but it's unlikely to come into play. The
default value is 10MB, which covers even the ridiculous
100,000-ref negotation in the included test (that
actually caps out just over 5MB). But it's configurable
on the off chance that you don't mind spending some
extra memory to make even ridiculous requests work.
2. We must take care only to buffer when we have to. For
pushes, the incoming packfile may be of arbitrary
size, and we should connect the input directly to
receive-pack. There's no deadlock problem here, though,
because we do not produce any output until the whole
packfile has been read.
For upload-pack's initial ref advertisement, we
similarly do not need to buffer. Even though we may
generate a lot of output, there is no request body at
all (i.e., it is a GET, not a POST).
[1] http://article.gmane.org/gmane.comp.version-control.git/269020
Test-adapted-from: Dennis Kaarsemaker <dennis@kaarsemaker.net>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-05-20 07:37:09 +00:00
|
|
|
git -C too-many-refs fetch -q --tags &&
|
|
|
|
(
|
|
|
|
cd "$HTTPD_DOCUMENT_ROOT_PATH/repo.git" &&
|
|
|
|
create_tags 50001 100000
|
|
|
|
) &&
|
|
|
|
git -C too-many-refs fetch -q --tags &&
|
|
|
|
git -C too-many-refs for-each-ref refs/tags >tags &&
|
|
|
|
test_line_count = 100000 tags
|
|
|
|
'
|
|
|
|
|
2016-04-27 12:20:37 +00:00
|
|
|
test_expect_success 'custom http headers' '
|
2016-05-09 06:19:00 +00:00
|
|
|
test_must_fail git -c http.extraheader="x-magic-two: cadabra" \
|
|
|
|
fetch "$HTTPD_URL/smart_headers/repo.git" &&
|
2016-04-27 12:20:37 +00:00
|
|
|
git -c http.extraheader="x-magic-one: abra" \
|
|
|
|
-c http.extraheader="x-magic-two: cadabra" \
|
2016-05-10 07:08:56 +00:00
|
|
|
fetch "$HTTPD_URL/smart_headers/repo.git" &&
|
|
|
|
git update-index --add --cacheinfo 160000,$(git rev-parse HEAD),sub &&
|
|
|
|
git config -f .gitmodules submodule.sub.path sub &&
|
|
|
|
git config -f .gitmodules submodule.sub.url \
|
|
|
|
"$HTTPD_URL/smart_headers/repo.git" &&
|
|
|
|
git submodule init sub &&
|
|
|
|
test_must_fail git submodule update sub &&
|
|
|
|
git -c http.extraheader="x-magic-one: abra" \
|
|
|
|
-c http.extraheader="x-magic-two: cadabra" \
|
|
|
|
submodule update sub
|
2016-04-27 12:20:37 +00:00
|
|
|
'
|
|
|
|
|
fetch-pack: unify ref in and out param
When a user fetches:
- at least one up-to-date ref and at least one non-up-to-date ref,
- using HTTP with protocol v0 (or something else that uses the fetch
command of a remote helper)
some refs might not be updated after the fetch.
This bug was introduced in commit 989b8c4452 ("fetch-pack: put shallow
info in output parameter", 2018-06-28) which allowed transports to
report the refs that they have fetched in a new out-parameter
"fetched_refs". If they do so, transport_fetch_refs() makes this
information available to its caller.
Users of "fetched_refs" rely on the following 3 properties:
(1) it is the complete list of refs that was passed to
transport_fetch_refs(),
(2) it has shallow information (REF_STATUS_REJECT_SHALLOW set if
relevant), and
(3) it has updated OIDs if ref-in-want was used (introduced after
989b8c4452).
In an effort to satisfy (1), whenever transport_fetch_refs()
filters the refs sent to the transport, it re-adds the filtered refs to
whatever the transport supplies before returning it to the user.
However, the implementation in 989b8c4452 unconditionally re-adds the
filtered refs without checking if the transport refrained from reporting
anything in "fetched_refs" (which it is allowed to do), resulting in an
incomplete list, no longer satisfying (1).
An earlier effort to resolve this [1] solved the issue by readding the
filtered refs only if the transport did not refrain from reporting in
"fetched_refs", but after further discussion, it seems that the better
solution is to revert the API change that introduced "fetched_refs".
This API change was first suggested as part of a ref-in-want
implementation that allowed for ref patterns and, thus, there could be
drastic differences between the input refs and the refs actually fetched
[2]; we eventually decided to only allow exact ref names, but this API
change remained even though its necessity was decreased.
Therefore, revert this API change by reverting commit 989b8c4452, and
make receive_wanted_refs() update the OIDs in the sought array (like how
update_shallow() updates shallow information in the sought array)
instead. A test is also included to show that the user-visible bug
discussed at the beginning of this commit message no longer exists.
[1] https://public-inbox.org/git/20180801171806.GA122458@google.com/
[2] https://public-inbox.org/git/86a128c5fb710a41791e7183207c4d64889f9307.1485381677.git.jonathantanmy@google.com/
Signed-off-by: Jonathan Tan <jonathantanmy@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-08-01 20:13:20 +00:00
|
|
|
test_expect_success 'using fetch command in remote-curl updates refs' '
|
|
|
|
SERVER="$HTTPD_DOCUMENT_ROOT_PATH/twobranch" &&
|
|
|
|
rm -rf "$SERVER" client &&
|
|
|
|
|
|
|
|
git init "$SERVER" &&
|
|
|
|
test_commit -C "$SERVER" foo &&
|
|
|
|
git -C "$SERVER" update-ref refs/heads/anotherbranch foo &&
|
|
|
|
|
|
|
|
git clone $HTTPD_URL/smart/twobranch client &&
|
|
|
|
|
|
|
|
test_commit -C "$SERVER" bar &&
|
|
|
|
git -C client -c protocol.version=0 fetch &&
|
|
|
|
|
|
|
|
git -C "$SERVER" rev-parse master >expect &&
|
|
|
|
git -C client rev-parse origin/master >actual &&
|
|
|
|
test_cmp expect actual
|
|
|
|
'
|
|
|
|
|
2018-09-27 19:24:07 +00:00
|
|
|
test_expect_success 'fetch by SHA-1 without tag following' '
|
|
|
|
SERVER="$HTTPD_DOCUMENT_ROOT_PATH/server" &&
|
|
|
|
rm -rf "$SERVER" client &&
|
|
|
|
|
|
|
|
git init "$SERVER" &&
|
|
|
|
test_commit -C "$SERVER" foo &&
|
|
|
|
|
|
|
|
git clone $HTTPD_URL/smart/server client &&
|
|
|
|
|
|
|
|
test_commit -C "$SERVER" bar &&
|
|
|
|
git -C "$SERVER" rev-parse bar >bar_hash &&
|
|
|
|
git -C client -c protocol.version=0 fetch \
|
|
|
|
--no-tags origin $(cat bar_hash)
|
|
|
|
'
|
|
|
|
|
2020-06-05 21:21:36 +00:00
|
|
|
test_expect_success 'cookies are redacted by default' '
|
2018-01-19 00:28:01 +00:00
|
|
|
rm -rf clone &&
|
|
|
|
echo "Set-Cookie: Foo=1" >cookies &&
|
|
|
|
echo "Set-Cookie: Bar=2" >>cookies &&
|
2020-06-05 21:21:36 +00:00
|
|
|
GIT_TRACE_CURL=true \
|
2020-05-11 17:43:10 +00:00
|
|
|
git -c "http.cookieFile=$(pwd)/cookies" clone \
|
|
|
|
$HTTPD_URL/smart/repo.git clone 2>err &&
|
2020-06-05 21:21:36 +00:00
|
|
|
grep "Cookie:.*Foo=<redacted>" err &&
|
2020-05-11 17:43:10 +00:00
|
|
|
grep "Cookie:.*Bar=<redacted>" err &&
|
2020-06-05 21:21:36 +00:00
|
|
|
! grep "Cookie:.*Foo=1" err &&
|
2020-05-11 17:43:10 +00:00
|
|
|
! grep "Cookie:.*Bar=2" err
|
|
|
|
'
|
|
|
|
|
2020-06-05 21:21:36 +00:00
|
|
|
test_expect_success 'empty values of cookies are also redacted' '
|
2020-05-11 17:43:10 +00:00
|
|
|
rm -rf clone &&
|
2020-06-05 21:21:36 +00:00
|
|
|
echo "Set-Cookie: Foo=" >cookies &&
|
|
|
|
GIT_TRACE_CURL=true \
|
2018-01-19 00:28:01 +00:00
|
|
|
git -c "http.cookieFile=$(pwd)/cookies" clone \
|
|
|
|
$HTTPD_URL/smart/repo.git clone 2>err &&
|
2020-06-05 21:21:36 +00:00
|
|
|
grep "Cookie:.*Foo=<redacted>" err
|
2018-01-19 00:28:01 +00:00
|
|
|
'
|
|
|
|
|
2020-06-05 21:21:36 +00:00
|
|
|
test_expect_success 'GIT_TRACE_REDACT=0 disables cookie redaction' '
|
2018-01-19 00:28:01 +00:00
|
|
|
rm -rf clone &&
|
2020-06-05 21:21:36 +00:00
|
|
|
echo "Set-Cookie: Foo=1" >cookies &&
|
|
|
|
echo "Set-Cookie: Bar=2" >>cookies &&
|
|
|
|
GIT_TRACE_REDACT=0 GIT_TRACE_CURL=true \
|
2018-01-19 00:28:01 +00:00
|
|
|
git -c "http.cookieFile=$(pwd)/cookies" clone \
|
|
|
|
$HTTPD_URL/smart/repo.git clone 2>err &&
|
2020-06-05 21:21:36 +00:00
|
|
|
grep "Cookie:.*Foo=1" err &&
|
|
|
|
grep "Cookie:.*Bar=2" err
|
2018-01-19 00:28:01 +00:00
|
|
|
'
|
|
|
|
|
2018-01-19 00:28:02 +00:00
|
|
|
test_expect_success 'GIT_TRACE_CURL_NO_DATA prevents data from being traced' '
|
|
|
|
rm -rf clone &&
|
|
|
|
GIT_TRACE_CURL=true \
|
|
|
|
git clone $HTTPD_URL/smart/repo.git clone 2>err &&
|
|
|
|
grep "=> Send data" err &&
|
|
|
|
|
|
|
|
rm -rf clone &&
|
|
|
|
GIT_TRACE_CURL=true GIT_TRACE_CURL_NO_DATA=1 \
|
|
|
|
git clone $HTTPD_URL/smart/repo.git clone 2>err &&
|
|
|
|
! grep "=> Send data" err
|
|
|
|
'
|
|
|
|
|
2019-02-06 19:19:10 +00:00
|
|
|
test_expect_success 'server-side error detected' '
|
|
|
|
test_must_fail git clone $HTTPD_URL/error_smart/repo.git 2>actual &&
|
2019-06-24 12:44:46 +00:00
|
|
|
test_i18ngrep "server-side error" actual
|
2019-02-06 19:19:10 +00:00
|
|
|
'
|
|
|
|
|
2009-10-31 00:47:47 +00:00
|
|
|
test_done
|