Merge branch 'master' of github.com:git/git

* 'master' of github.com:git/git: (51 commits)
  Hopefully the last batch of fixes before 2.44 final
  Git 2.43.2
  A few more fixes before -rc1
  write-or-die: fix the polarity of GIT_FLUSH environment variable
  A few more topics before -rc1
  completion: add and use __git_compute_second_level_config_vars_for_section
  completion: add and use __git_compute_first_level_config_vars_for_section
  completion: complete 'submodule.*' config variables
  completion: add space after config variable names also in Bash 3
  receive-pack: use find_commit_header() in check_nonce()
  ci(linux32): add a note about Actions that must not be updated
  ci: bump remaining outdated Actions versions
  unit-tests: do show relative file paths on non-Windows, too
  receive-pack: use find_commit_header() in check_cert_push_options()
  prune: mark rebase autostash and orig-head as reachable
  sequencer: unset GIT_CHERRY_PICK_HELP for 'exec' commands
  ref-filter.c: sort formatted dates by byte value
  ssh signing: signal an error with a negative return value
  bisect: document command line arguments for "bisect start"
  bisect: document "terms" subcommand more fully
  ...
This commit is contained in:
Jiang Xin 2024-02-15 09:48:25 +08:00
commit f98643fcb2
62 changed files with 1122 additions and 542 deletions

View file

@ -4,4 +4,7 @@ a mailing list (git@vger.kernel.org) for code submissions, code reviews, and
bug reports. Nevertheless, you can use GitGitGadget (https://gitgitgadget.github.io/)
to conveniently send your Pull Requests commits to our mailing list.
For a single-commit pull request, please *leave the pull request description
empty*: your commit message itself should describe your changes.
Please read the "guidelines for contributing" linked above!

View file

@ -19,7 +19,7 @@ jobs:
check-whitespace:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
fetch-depth: 0

View file

@ -38,7 +38,7 @@ jobs:
COVERITY_LANGUAGE: cxx
COVERITY_PLATFORM: overridden-below
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: install minimal Git for Windows SDK
if: contains(matrix.os, 'windows')
uses: git-for-windows/setup-git-for-windows-sdk@v1
@ -98,7 +98,7 @@ jobs:
# A cache miss will add ~30s to create, but a cache hit will save minutes.
- name: restore the Coverity Build Tool
id: cache
uses: actions/cache/restore@v3
uses: actions/cache/restore@v4
with:
path: ${{ runner.temp }}/cov-analysis
key: cov-build-${{ env.COVERITY_LANGUAGE }}-${{ env.COVERITY_PLATFORM }}-${{ steps.lookup.outputs.hash }}
@ -141,7 +141,7 @@ jobs:
esac
- name: cache the Coverity Build Tool
if: steps.cache.outputs.cache-hit != 'true'
uses: actions/cache/save@v3
uses: actions/cache/save@v4
with:
path: ${{ runner.temp }}/cov-analysis
key: cov-build-${{ env.COVERITY_LANGUAGE }}-${{ env.COVERITY_PLATFORM }}-${{ steps.lookup.outputs.hash }}

View file

@ -63,7 +63,7 @@ jobs:
echo "skip_concurrent=$skip_concurrent" >>$GITHUB_OUTPUT
- name: skip if the commit or tree was already tested
id: skip-if-redundant
uses: actions/github-script@v6
uses: actions/github-script@v7
if: steps.check-ref.outputs.enabled == 'yes'
with:
github-token: ${{secrets.GITHUB_TOKEN}}
@ -112,7 +112,7 @@ jobs:
group: windows-build-${{ github.ref }}
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: git-for-windows/setup-git-for-windows-sdk@v1
- name: build
shell: bash
@ -123,7 +123,7 @@ jobs:
- name: zip up tracked files
run: git archive -o artifacts/tracked.tar.gz HEAD
- name: upload tracked files and build artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: windows-artifacts
path: artifacts
@ -140,7 +140,7 @@ jobs:
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
steps:
- name: download tracked files and build artifacts
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: windows-artifacts
path: ${{github.workspace}}
@ -157,7 +157,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: failed-tests-windows
path: ${{env.FAILED_TEST_ARTIFACTS}}
@ -173,10 +173,10 @@ jobs:
group: vs-build-${{ github.ref }}
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: git-for-windows/setup-git-for-windows-sdk@v1
- name: initialize vcpkg
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
repository: 'microsoft/vcpkg'
path: 'compat/vcbuild/vcpkg'
@ -212,7 +212,7 @@ jobs:
- name: zip up tracked files
run: git archive -o artifacts/tracked.tar.gz HEAD
- name: upload tracked files and build artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: vs-artifacts
path: artifacts
@ -230,7 +230,7 @@ jobs:
steps:
- uses: git-for-windows/setup-git-for-windows-sdk@v1
- name: download tracked files and build artifacts
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: vs-artifacts
path: ${{github.workspace}}
@ -248,7 +248,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: failed-tests-windows
path: ${{env.FAILED_TEST_ARTIFACTS}}
@ -297,7 +297,7 @@ jobs:
runs_on_pool: ${{matrix.vector.pool}}
runs-on: ${{matrix.vector.pool}}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- run: ci/install-dependencies.sh
- run: ci/run-build-and-tests.sh
- name: print test failures
@ -305,7 +305,7 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != ''
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: failed-tests-${{matrix.vector.jobname}}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@ -317,7 +317,7 @@ jobs:
CC: clang
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- run: ci/install-dependencies.sh
- run: ci/run-build-and-minimal-fuzzers.sh
dockerized:
@ -342,9 +342,9 @@ jobs:
runs-on: ubuntu-latest
container: ${{matrix.vector.image}}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
if: matrix.vector.jobname != 'linux32'
- uses: actions/checkout@v1
- uses: actions/checkout@v1 # cannot be upgraded because Node.js Actions aren't supported in this container
if: matrix.vector.jobname == 'linux32'
- run: ci/install-docker-dependencies.sh
- run: ci/run-build-and-tests.sh
@ -353,13 +353,13 @@ jobs:
run: ci/print-test-failures.sh
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != '' && matrix.vector.jobname != 'linux32'
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: failed-tests-${{matrix.vector.jobname}}
path: ${{env.FAILED_TEST_ARTIFACTS}}
- name: Upload failed tests' directories
if: failure() && env.FAILED_TEST_ARTIFACTS != '' && matrix.vector.jobname == 'linux32'
uses: actions/upload-artifact@v1
uses: actions/upload-artifact@v1 # cannot be upgraded because Node.js Actions aren't supported in this container
with:
name: failed-tests-${{matrix.vector.jobname}}
path: ${{env.FAILED_TEST_ARTIFACTS}}
@ -373,7 +373,7 @@ jobs:
group: static-analysis-${{ github.ref }}
cancel-in-progress: ${{ needs.ci-config.outputs.skip_concurrent == 'yes' }}
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- run: ci/install-dependencies.sh
- run: ci/run-static-analysis.sh
- run: ci/check-directional-formatting.bash
@ -396,7 +396,7 @@ jobs:
artifact: sparse-20.04
- name: Install the current `sparse` package
run: sudo dpkg -i sparse-20.04/sparse_*.deb
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Install other dependencies
run: ci/install-dependencies.sh
- run: make sparse
@ -411,6 +411,6 @@ jobs:
jobname: Documentation
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- run: ci/install-dependencies.sh
- run: ci/test-documentation.sh

View file

@ -0,0 +1,37 @@
Git 2.43.2 Release Notes
========================
Relative to Git 2.43.1, this release has two important fixes to allow
"git imap-send" to be built with NO_CURL defined, and to restore the
forced flushing behaviour when GIT_FLUSH=1 is set. It also contains
other, unexciting, fixes that have already been merged to the 'master'
branch of the development towards the next major release.
Fixes since Git 2.43.1
----------------------
* Update to a new feature recently added, "git show-ref --exists".
* Rename detection logic ignored the final line of a file if it is an
incomplete line.
* "git diff --no-rename A B" did not disable rename detection but did
not trigger an error from the command line parser.
* "git diff --no-index file1 file2" segfaulted while invoking the
external diff driver, which has been corrected.
* Rewrite //-comments to /* comments */ in files whose comments
prevalently use the latter.
* A failed "git tag -s" did not necessarily result in an error
depending on the crypto backend, which has been corrected.
* "git stash" sometimes was silent even when it failed due to
unwritable index file, which has been corrected.
* Recent conversion to allow more than 0/1 in GIT_FLUSH broke the
mechanism by flipping what yes/no means by mistake, which has been
corrected.
Also contains documentation updates, code clean-ups and minor fixups.

View file

@ -91,6 +91,17 @@ UI, Workflows & Features
refresh token the same way as credential-cache and
credential-libsecret backends.
* Command line completion support (in contrib/) has been
updated for "git bisect".
* "git branch" and friends learned to use the formatted text as
sorting key, not the underlying timestamp value, when the --sort
option is used with author or committer timestamp with a format
specifier (e.g., "--sort=creatordate:format:%H:%M:%S").
* The command line completion script (in contrib/) learned to
complete configuration variable names better.
Performance, Internal Implementation, Development Support etc.
@ -151,6 +162,18 @@ Performance, Internal Implementation, Development Support etc.
* The priority queue test has been migrated to the unit testing
framework.
* Setting `feature.experimental` opts the user into multi-pack reuse
experiment
* Squelch node.js 16 deprecation warnings from GitHub Actions CI
by updating actions/github-script and actions/checkout that use
node.js 20.
* The mechanism to report the filename in the source code, used by
the unit-test machinery, assumed that the compiler expanded __FILE__
to the path to the source given to the $(CC), but some compilers
give full path, breaking the output. This has been corrected.
Fixes since v2.43
-----------------
@ -234,21 +257,18 @@ Fixes since v2.43
data from commit-graph too early, which has been corrected.
* Update to a new feature recently added, "git show-ref --exists".
(merge 0aabeaa562 tc/show-ref-exists-fix later to maint).
* oss-fuzz tests are built and run in CI.
(merge c4a9cf1df3 js/oss-fuzz-build-in-ci later to maint).
* Rename detection logic ignored the final line of a file if it is an
incomplete line.
(merge 1c5bc6971e en/diffcore-delta-final-line-fix later to maint).
* GitHub CI update.
(merge 0188b2c8e0 pb/ci-github-skip-logs-for-broken-tests later to maint).
* "git diff --no-rename A B" did not disable rename detection but did
not trigger an error from the command line parser.
(merge 457f96252f rs/parse-options-with-keep-unknown-abbrev-fix later to maint).
* "git archive --remote=<remote>" learned to talk over the smart
http (aka stateless) transport.
@ -265,11 +285,9 @@ Fixes since v2.43
* "git diff --no-index file1 file2" segfaulted while invoking the
external diff driver, which has been corrected.
(merge 85a9a63c92 jk/diff-external-with-no-index later to maint).
* Rewrite //-comments to /* comments */ in files whose comments
prevalently use the latter.
(merge de65079d7b jc/comment-style-fixes later to maint).
* Cirrus CI jobs started breaking because we specified version of
FreeBSD that is no longer available, which has been corrected.
@ -279,17 +297,37 @@ Fixes since v2.43
as <ptr, length> with a wrong length, which has been corrected.
(merge 156e28b36d jh/sparse-index-expand-to-path-fix later to maint).
* A failed "git tag -s" did not necessarily result in an error
depending on the crypto backend, which has been corrected.
* "git stash" sometimes was silent even when it failed due to
unwritable index file, which has been corrected.
* "git show-ref --verify" did not show things like "CHERRY_PICK_HEAD",
which has been corrected.
* Recent conversion to allow more than 0/1 in GIT_FLUSH broke the
mechanism by flipping what yes/no means by mistake, which has been
corrected.
* The sequencer machinery does not use the ref API and instead
records names of certain objects it needs for its correct operation
in temporary files, which makes these objects susceptible to loss
by garbage collection. These temporary files have been added as
starting points for reachability analysis to fix this.
(merge bc7f5db896 pw/gc-during-rebase later to maint).
* "git cherry-pick" invoked during "git rebase -i" session lost
the authorship information, which has been corrected.
(merge e4301f73ff vn/rebase-with-cherry-pick-authorship later to maint).
* The code paths that call repo_read_object_file() have been
tightened to react to errors.
(merge 568459bf5e js/check-null-from-read-object-file later to maint).
* Other code cleanup, docfix, build fix, etc.
(merge 5aea3955bc rj/clarify-branch-doc-m later to maint).
(merge 9cce3be2df bk/bisect-doc-fix later to maint).
(merge 8f50984cf4 ne/doc-filter-blob-limit-fix later to maint).
(merge f10b0989b8 la/strvec-comment-fix later to maint).
(merge 8430b438f6 vd/fsck-submodule-url-test later to maint).
(merge f10031fadd nb/rebase-x-shell-docfix later to maint).
(merge af3d2c160f jc/majordomo-to-subspace later to maint).
(merge ee9895b0ff sd/negotiate-trace-fix later to maint).
(merge 976d0251ce jc/coc-whitespace-fix later to maint).
(merge 9023198280 jt/p4-spell-re-with-raw-string later to maint).
(merge 36c9c44fa4 tb/pack-bitmap-drop-unused-struct-member later to maint).
(merge 19ed0dff8f js/win32-retry-pipe-write-on-enospc later to maint).
(merge 3cb4384683 jc/t0091-with-unknown-git later to maint).
(merge 020456cb74 rs/receive-pack-remove-find-header later to maint).

View file

@ -17,6 +17,9 @@ skipping more commits at a time, reducing the number of round trips.
+
* `pack.useBitmapBoundaryTraversal=true` may improve bitmap traversal times by
walking fewer objects.
+
* `pack.allowPackReuse=multi` may improve the time it takes to create a pack by
reusing objects from multiple packs instead of just one.
feature.manyFiles::
Enable config options that optimize for repos with many files in the

View file

@ -16,11 +16,11 @@ DESCRIPTION
The command takes various subcommands, and different options depending
on the subcommand:
git bisect start [--term-(new|bad)=<term-new> --term-(old|good)=<term-old>]
git bisect start [--term-(bad|new)=<term-new> --term-(good|old)=<term-old>]
[--no-checkout] [--first-parent] [<bad> [<good>...]] [--] [<pathspec>...]
git bisect (bad|new|<term-new>) [<rev>]
git bisect (good|old|<term-old>) [<rev>...]
git bisect terms [--term-good | --term-bad]
git bisect terms [--term-(good|old) | --term-(bad|new)]
git bisect skip [(<rev>|<range>)...]
git bisect reset [<commit>]
git bisect (visualize|view)
@ -165,8 +165,10 @@ To get a reminder of the currently used terms, use
git bisect terms
------------------------------------------------
You can get just the old (respectively new) term with `git bisect terms
--term-old` or `git bisect terms --term-good`.
You can get just the old term with `git bisect terms --term-old`
or `git bisect terms --term-good`; `git bisect terms --term-new`
and `git bisect terms --term-bad` can be used to learn how to call
the commits more recent than the sought change.
If you would like to use your own terms instead of "bad"/"good" or
"new"/"old", you can choose any names you like (except existing bisect

View file

@ -359,9 +359,11 @@ In any case, a field name that refers to a field inapplicable to
the object referred by the ref does not cause an error. It
returns an empty string instead.
As a special case for the date-type fields, you may specify a format for
the date by adding `:` followed by date format name (see the
values the `--date` option to linkgit:git-rev-list[1] takes).
As a special case for the date-type fields, you may specify a format for the
date by adding `:` followed by date format name (see the values the `--date`
option to linkgit:git-rev-list[1] takes). If this formatting is provided in
a `--sort` key, references will be sorted according to the byte-value of the
formatted string rather than the numeric value of the underlying timestamp.
Some atoms like %(align) and %(if) always require a matching %(end).
We call them "opening atoms" and sometimes denote them as %($open).

View file

@ -1,7 +1,7 @@
#!/bin/sh
GVF=GIT-VERSION-FILE
DEF_VER=v2.44.0-rc0
DEF_VER=v2.44.0-rc1
LF='
'

View file

@ -158,6 +158,9 @@ static void show_list(const char *debug, int counted, int nr,
const char *subject_start;
int subject_len;
if (!buf)
die(_("unable to read %s"), oid_to_hex(&commit->object.oid));
fprintf(stderr, "%c%c%c ",
(commit_flags & TREESAME) ? ' ' : 'T',
(commit_flags & UNINTERESTING) ? 'U' : ' ',

View file

@ -221,6 +221,10 @@ static int cat_one_file(int opt, const char *exp_type, const char *obj_name,
&type,
&size);
const char *target;
if (!buffer)
die(_("unable to read %s"), oid_to_hex(&oid));
if (!skip_prefix(buffer, "object ", &target) ||
get_oid_hex(target, &blob_oid))
die("%s not a valid tag", oid_to_hex(&oid));
@ -416,6 +420,8 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
contents = repo_read_object_file(the_repository, oid, &type,
&size);
if (!contents)
die("object %s disappeared", oid_to_hex(oid));
if (use_mailmap) {
size_t s = size;
@ -423,8 +429,6 @@ static void print_object_or_die(struct batch_options *opt, struct expand_data *d
size = cast_size_t_to_ulong(s);
}
if (!contents)
die("object %s disappeared", oid_to_hex(oid));
if (type != data->type)
die("object %s changed type!?", oid_to_hex(oid));
if (data->info.sizep && size != data->size && !use_mailmap)
@ -481,6 +485,8 @@ static void batch_object_write(const char *obj_name,
buf = repo_read_object_file(the_repository, &data->oid, &data->type,
&data->size);
if (!buf)
die(_("unable to read %s"), oid_to_hex(&data->oid));
buf = replace_idents_using_mailmap(buf, &s);
data->size = cast_size_t_to_ulong(s);

View file

@ -571,6 +571,8 @@ static int grep_cache(struct grep_opt *opt,
data = repo_read_object_file(the_repository, &ce->oid,
&type, &size);
if (!data)
die(_("unable to read tree %s"), oid_to_hex(&ce->oid));
init_tree_desc(&tree, data, size);
hit |= grep_tree(opt, pathspec, &tree, &name, 0, 0);

View file

@ -716,9 +716,11 @@ static int append_edit(int argc, const char **argv, const char *prefix)
struct strbuf buf = STRBUF_INIT;
char *prev_buf = repo_read_object_file(the_repository, note, &type, &size);
if (prev_buf && size)
if (!prev_buf)
die(_("unable to read %s"), oid_to_hex(note));
if (size)
strbuf_add(&buf, prev_buf, size);
if (d.buf.len && prev_buf && size)
if (d.buf.len && size)
append_separator(&buf);
strbuf_insert(&d.buf, 0, buf.buf, buf.len);

View file

@ -4396,6 +4396,8 @@ int cmd_pack_objects(int argc, const char **argv, const char *prefix)
prepare_repo_settings(the_repository);
if (sparse < 0)
sparse = the_repository->settings.pack_use_sparse;
if (the_repository->settings.pack_use_multi_pack_reuse)
allow_pack_reuse = MULTI_PACK_REUSE;
}
reset_pack_idx_option(&pack_idx_opts);

View file

@ -593,21 +593,6 @@ static char *prepare_push_cert_nonce(const char *path, timestamp_t stamp)
return strbuf_detach(&buf, NULL);
}
static char *find_header(const char *msg, size_t len, const char *key,
const char **next_line)
{
size_t out_len;
const char *val = find_header_mem(msg, len, key, &out_len);
if (!val)
return NULL;
if (next_line)
*next_line = val + out_len + 1;
return xmemdupz(val, out_len);
}
/*
* Return zero if a and b are equal up to n bytes and nonzero if they are not.
* This operation is guaranteed to run in constant time to avoid leaking data.
@ -622,13 +607,14 @@ static int constant_memequal(const char *a, const char *b, size_t n)
return res;
}
static const char *check_nonce(const char *buf, size_t len)
static const char *check_nonce(const char *buf)
{
char *nonce = find_header(buf, len, "nonce", NULL);
size_t noncelen;
const char *found = find_commit_header(buf, "nonce", &noncelen);
char *nonce = found ? xmemdupz(found, noncelen) : NULL;
timestamp_t stamp, ostamp;
char *bohmac, *expect = NULL;
const char *retval = NONCE_BAD;
size_t noncelen;
if (!nonce) {
retval = NONCE_MISSING;
@ -670,7 +656,6 @@ static const char *check_nonce(const char *buf, size_t len)
goto leave;
}
noncelen = strlen(nonce);
expect = prepare_push_cert_nonce(service_dir, stamp);
if (noncelen != strlen(expect)) {
/* This is not even the right size. */
@ -718,35 +703,28 @@ static const char *check_nonce(const char *buf, size_t len)
static int check_cert_push_options(const struct string_list *push_options)
{
const char *buf = push_cert.buf;
int len = push_cert.len;
char *option;
const char *next_line;
const char *option;
size_t optionlen;
int options_seen = 0;
int retval = 1;
if (!len)
if (!*buf)
return 1;
while ((option = find_header(buf, len, "push-option", &next_line))) {
len -= (next_line - buf);
buf = next_line;
while ((option = find_commit_header(buf, "push-option", &optionlen))) {
buf = option + optionlen + 1;
options_seen++;
if (options_seen > push_options->nr
|| strcmp(option,
push_options->items[options_seen - 1].string)) {
retval = 0;
goto leave;
}
free(option);
|| xstrncmpz(push_options->items[options_seen - 1].string,
option, optionlen))
return 0;
}
if (options_seen != push_options->nr)
retval = 0;
leave:
free(option);
return retval;
}
@ -773,7 +751,7 @@ static void prepare_push_cert_sha1(struct child_process *proc)
check_signature(&sigcheck, push_cert.buf + bogs,
push_cert.len - bogs);
nonce_status = check_nonce(push_cert.buf, bogs);
nonce_status = check_nonce(sigcheck.payload);
}
if (!is_null_oid(&push_cert_oid)) {
strvec_pushf(&proc->env, "GIT_PUSH_CERT=%s",

View file

@ -172,7 +172,7 @@ static int cmd_show_ref__verify(const struct show_one_options *show_one_opts,
while (*refs) {
struct object_id oid;
if ((starts_with(*refs, "refs/") || !strcmp(*refs, "HEAD")) &&
if ((starts_with(*refs, "refs/") || refname_is_safe(*refs)) &&
!read_ref(*refs, &oid)) {
show_one(show_one_opts, *refs, &oid);
}

View file

@ -520,7 +520,7 @@ static void unstage_changes_unless_new(struct object_id *orig_tree)
repo_hold_locked_index(the_repository, &lock, LOCK_DIE_ON_ERROR);
if (write_locked_index(&the_index, &lock,
COMMIT_LOCK | SKIP_IF_UNCHANGED))
die(_("Unable to write index."));
die(_("could not write index"));
}
static int do_apply_stash(const char *prefix, struct stash_info *info,
@ -537,7 +537,7 @@ static int do_apply_stash(const char *prefix, struct stash_info *info,
repo_read_index_preload(the_repository, NULL, 0);
if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0,
NULL, NULL, NULL))
return -1;
return error(_("could not write index"));
if (write_index_as_tree(&c_tree, &the_index, get_index_file(), 0,
NULL))
@ -1364,7 +1364,7 @@ static int do_create_stash(const struct pathspec *ps, struct strbuf *stash_msg_b
repo_read_index_preload(the_repository, NULL, 0);
if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0,
NULL, NULL, NULL) < 0) {
ret = -1;
ret = error(_("could not write index"));
goto done;
}
@ -1555,7 +1555,7 @@ static int do_push_stash(const struct pathspec *ps, const char *stash_msg, int q
if (repo_refresh_and_write_index(the_repository, REFRESH_QUIET, 0, 0,
NULL, NULL, NULL)) {
ret = -1;
ret = error(_("could not write index"));
goto done;
}

View file

@ -153,7 +153,7 @@ static int verify_tag(const char *name, const char *ref UNUSED,
static int do_sign(struct strbuf *buffer)
{
return sign_buffer(buffer, buffer, get_signing_key());
return sign_buffer(buffer, buffer, get_signing_key()) ? -1 : 0;
}
static const char tag_template[] =

View file

@ -337,6 +337,8 @@ static char *grab_blob(struct repository *r,
free_filespec(df);
} else {
blob = repo_read_object_file(r, oid, &type, size);
if (!blob)
die(_("unable to read %s"), oid_to_hex(oid));
if (type != OBJ_BLOB)
die("object '%s' is not a blob!", oid_to_hex(oid));
}

View file

@ -1483,12 +1483,32 @@ _git_bisect ()
{
__git_has_doubledash && return
local subcommands="start bad good skip reset visualize replay log run"
local subcommand="$(__git_find_on_cmdline "$subcommands")"
__git_find_repo_path
# If a bisection is in progress get the terms being used.
local term_bad term_good
if [ -f "$__git_repo_path"/BISECT_TERMS ]; then
term_bad=$(__git bisect terms --term-bad)
term_good=$(__git bisect terms --term-good)
fi
# We will complete any custom terms, but still always complete the
# more usual bad/new/good/old because git bisect gives a good error
# message if these are given when not in use, and that's better than
# silent refusal to complete if the user is confused.
#
# We want to recognize 'view' but not complete it, because it overlaps
# with 'visualize' too much and is just an alias for it.
#
local completable_subcommands="start bad new $term_bad good old $term_good terms skip reset visualize replay log run help"
local all_subcommands="$completable_subcommands view"
local subcommand="$(__git_find_on_cmdline "$all_subcommands")"
if [ -z "$subcommand" ]; then
__git_find_repo_path
if [ -f "$__git_repo_path"/BISECT_START ]; then
__gitcomp "$subcommands"
__gitcomp "$completable_subcommands"
else
__gitcomp "replay start"
fi
@ -1496,7 +1516,26 @@ _git_bisect ()
fi
case "$subcommand" in
bad|good|reset|skip|start)
start)
case "$cur" in
--*)
__gitcomp "--first-parent --no-checkout --term-new --term-bad --term-old --term-good"
return
;;
*)
__git_complete_refs
;;
esac
;;
terms)
__gitcomp "--term-good --term-old --term-bad --term-new"
return
;;
visualize|view)
__git_complete_log_opts
return
;;
bad|new|"$term_bad"|good|old|"$term_good"|reset|skip)
__git_complete_refs
;;
*)
@ -2105,10 +2144,12 @@ __git_diff_merges_opts="off none on first-parent 1 separate m combined c dense-c
__git_log_pretty_formats="oneline short medium full fuller reference email raw format: tformat: mboxrd"
__git_log_date_formats="relative iso8601 iso8601-strict rfc2822 short local default human raw unix auto: format:"
_git_log ()
# Complete porcelain (i.e. not git-rev-list) options and at least some
# option arguments accepted by git-log. Note that this same set of options
# are also accepted by some other git commands besides git-log.
__git_complete_log_opts ()
{
__git_has_doubledash && return
__git_find_repo_path
COMPREPLY=()
local merge=""
if __git_pseudoref_exists MERGE_HEAD; then
@ -2204,6 +2245,16 @@ _git_log ()
return
;;
esac
}
_git_log ()
{
__git_has_doubledash && return
__git_find_repo_path
__git_complete_log_opts
[ ${#COMPREPLY[@]} -eq 0 ] || return
__git_complete_revlist
}
@ -2609,6 +2660,31 @@ __git_compute_config_vars ()
__git_config_vars="$(git help --config-for-completion)"
}
__git_config_vars_all=
__git_compute_config_vars_all ()
{
test -n "$__git_config_vars_all" ||
__git_config_vars_all="$(git --no-pager help --config)"
}
__git_compute_first_level_config_vars_for_section ()
{
local section="$1"
__git_compute_config_vars
local this_section="__git_first_level_config_vars_for_section_${section}"
test -n "${!this_section}" ||
printf -v "__git_first_level_config_vars_for_section_${section}" %s "$(echo "$__git_config_vars" | grep -E "^${section}\.[a-z]" | awk -F. '{print $2}')"
}
__git_compute_second_level_config_vars_for_section ()
{
local section="$1"
__git_compute_config_vars_all
local this_section="__git_second_level_config_vars_for_section_${section}"
test -n "${!this_section}" ||
printf -v "__git_second_level_config_vars_for_section_${section}" %s "$(echo "$__git_config_vars_all" | grep -E "^${section}\.<" | awk -F. '{print $3}')"
}
__git_config_sections=
__git_compute_config_sections ()
{
@ -2753,73 +2829,50 @@ __git_complete_config_variable_name ()
done
case "$cur_" in
branch.*.*)
branch.*.*|guitool.*.*|difftool.*.*|man.*.*|mergetool.*.*|remote.*.*|submodule.*.*|url.*.*)
local pfx="${cur_%.*}."
cur_="${cur_##*.}"
__gitcomp "remote pushRemote merge mergeOptions rebase" "$pfx" "$cur_" "$sfx"
local section="${pfx%.*.}"
__git_compute_second_level_config_vars_for_section "${section}"
local this_section="__git_second_level_config_vars_for_section_${section}"
__gitcomp "${!this_section}" "$pfx" "$cur_" "$sfx"
return
;;
branch.*)
local pfx="${cur_%.*}."
cur_="${cur_#*.}"
local section="${pfx%.}"
__gitcomp_direct "$(__git_heads "$pfx" "$cur_" ".")"
__gitcomp_nl_append $'autoSetupMerge\nautoSetupRebase\n' "$pfx" "$cur_" "${sfx- }"
return
;;
guitool.*.*)
local pfx="${cur_%.*}."
cur_="${cur_##*.}"
__gitcomp "
argPrompt cmd confirm needsFile noConsole noRescan
prompt revPrompt revUnmerged title
" "$pfx" "$cur_" "$sfx"
return
;;
difftool.*.*)
local pfx="${cur_%.*}."
cur_="${cur_##*.}"
__gitcomp "cmd path" "$pfx" "$cur_" "$sfx"
return
;;
man.*.*)
local pfx="${cur_%.*}."
cur_="${cur_##*.}"
__gitcomp "cmd path" "$pfx" "$cur_" "$sfx"
return
;;
mergetool.*.*)
local pfx="${cur_%.*}."
cur_="${cur_##*.}"
__gitcomp "cmd path trustExitCode" "$pfx" "$cur_" "$sfx"
__git_compute_first_level_config_vars_for_section "${section}"
local this_section="__git_first_level_config_vars_for_section_${section}"
__gitcomp_nl_append "${!this_section}" "$pfx" "$cur_" "${sfx:- }"
return
;;
pager.*)
local pfx="${cur_%.*}."
cur_="${cur_#*.}"
__git_compute_all_commands
__gitcomp_nl "$__git_all_commands" "$pfx" "$cur_" "${sfx- }"
return
;;
remote.*.*)
local pfx="${cur_%.*}."
cur_="${cur_##*.}"
__gitcomp "
url proxy fetch push mirror skipDefaultUpdate
receivepack uploadpack tagOpt pushurl
" "$pfx" "$cur_" "$sfx"
__gitcomp_nl "$__git_all_commands" "$pfx" "$cur_" "${sfx:- }"
return
;;
remote.*)
local pfx="${cur_%.*}."
cur_="${cur_#*.}"
local section="${pfx%.}"
__gitcomp_nl "$(__git_remotes)" "$pfx" "$cur_" "."
__gitcomp_nl_append "pushDefault" "$pfx" "$cur_" "${sfx- }"
__git_compute_first_level_config_vars_for_section "${section}"
local this_section="__git_first_level_config_vars_for_section_${section}"
__gitcomp_nl_append "${!this_section}" "$pfx" "$cur_" "${sfx:- }"
return
;;
url.*.*)
submodule.*)
local pfx="${cur_%.*}."
cur_="${cur_##*.}"
__gitcomp "insteadOf pushInsteadOf" "$pfx" "$cur_" "$sfx"
cur_="${cur_#*.}"
local section="${pfx%.}"
__gitcomp_nl "$(__git config -f "$(__git rev-parse --show-toplevel)/.gitmodules" --get-regexp 'submodule.*.path' | awk -F. '{print $2}')" "$pfx" "$cur_" "."
__git_compute_first_level_config_vars_for_section "${section}"
local this_section="__git_first_level_config_vars_for_section_${section}"
__gitcomp_nl_append "${!this_section}" "$pfx" "$cur_" "${sfx:- }"
return
;;
*.*)

View file

@ -1078,7 +1078,7 @@ static int sign_buffer_ssh(struct strbuf *buffer, struct strbuf *signature,
if (strstr(signer_stderr.buf, "usage:"))
error(_("ssh-keygen -Y sign is needed for ssh signing (available in openssh version 8.2p1+)"));
error("%s", signer_stderr.buf);
ret = error("%s", signer_stderr.buf);
goto out;
}

View file

@ -66,7 +66,7 @@ size_t parse_signed_buffer(const char *buf, size_t size);
* Create a detached signature for the contents of "buffer" and append
* it after "signature"; "buffer" and "signature" can be the same
* strbuf instance, which would cause the detached signature appended
* at the end.
* at the end. Returns 0 on success, non-zero on failure.
*/
int sign_buffer(struct strbuf *buffer, struct strbuf *signature,
const char *signing_key);

View file

@ -17,6 +17,7 @@
#include "pack-mtimes.h"
#include "config.h"
#include "run-command.h"
#include "sequencer.h"
struct connectivity_progress {
struct progress *progress;
@ -30,6 +31,52 @@ static void update_progress(struct connectivity_progress *cp)
display_progress(cp->progress, cp->count);
}
static void add_one_file(const char *path, struct rev_info *revs)
{
struct strbuf buf = STRBUF_INIT;
struct object_id oid;
struct object *object;
if (!read_oneliner(&buf, path, READ_ONELINER_SKIP_IF_EMPTY)) {
strbuf_release(&buf);
return;
}
strbuf_trim(&buf);
if (!get_oid_hex(buf.buf, &oid)) {
object = parse_object_or_die(&oid, buf.buf);
add_pending_object(revs, object, "");
}
strbuf_release(&buf);
}
/* Mark objects recorded in rebase state files as reachable. */
static void add_rebase_files(struct rev_info *revs)
{
struct strbuf buf = STRBUF_INIT;
size_t len;
const char *path[] = {
"rebase-apply/autostash",
"rebase-apply/orig-head",
"rebase-merge/autostash",
"rebase-merge/orig-head",
};
struct worktree **worktrees = get_worktrees();
for (struct worktree **wt = worktrees; *wt; wt++) {
strbuf_reset(&buf);
strbuf_addstr(&buf, get_worktree_git_dir(*wt));
strbuf_complete(&buf, '/');
len = buf.len;
for (size_t i = 0; i < ARRAY_SIZE(path); i++) {
strbuf_setlen(&buf, len);
strbuf_addstr(&buf, path[i]);
add_one_file(buf.buf, revs);
}
}
strbuf_release(&buf);
free_worktrees(worktrees);
}
static int add_one_ref(const char *path, const struct object_id *oid,
int flag, void *cb_data)
{
@ -322,6 +369,9 @@ void mark_reachable_objects(struct rev_info *revs, int mark_reflog,
head_ref(add_one_ref, revs);
other_head_refs(add_one_ref, revs);
/* rebase autostash and orig-head */
add_rebase_files(revs);
/* Add all reflog info */
if (mark_reflog)
add_reflogs_to_pending(revs, 0);

View file

@ -1611,6 +1611,12 @@ static void grab_date(const char *buf, struct atom_value *v, const char *atomnam
if (formatp) {
formatp++;
parse_date_format(formatp, &date_mode);
/*
* If this is a sort field and a format was specified, we'll
* want to compare formatted date by string value.
*/
v->atom->type = FIELD_STR;
}
if (!eoemail)

View file

@ -64,12 +64,11 @@ void free_names(char **a)
reftable_free(a);
}
int names_length(char **names)
size_t names_length(char **names)
{
char **p = names;
for (; *p; p++) {
/* empty */
}
while (*p)
p++;
return p - names;
}
@ -89,17 +88,13 @@ void parse_names(char *buf, int size, char ***namesp)
next = end;
}
if (p < next) {
if (names_len == names_cap) {
names_cap = 2 * names_cap + 1;
names = reftable_realloc(
names, names_cap * sizeof(*names));
}
REFTABLE_ALLOC_GROW(names, names_len + 1, names_cap);
names[names_len++] = xstrdup(p);
}
p = next + 1;
}
names = reftable_realloc(names, (names_len + 1) * sizeof(*names));
REFTABLE_REALLOC_ARRAY(names, names_len + 1);
names[names_len] = NULL;
*namesp = names;
}

View file

@ -44,14 +44,27 @@ void parse_names(char *buf, int size, char ***namesp);
int names_equal(char **a, char **b);
/* returns the array size of a NULL-terminated array of strings. */
int names_length(char **names);
size_t names_length(char **names);
/* Allocation routines; they invoke the functions set through
* reftable_set_alloc() */
void *reftable_malloc(size_t sz);
void *reftable_realloc(void *p, size_t sz);
void reftable_free(void *p);
void *reftable_calloc(size_t sz);
void *reftable_calloc(size_t nelem, size_t elsize);
#define REFTABLE_ALLOC_ARRAY(x, alloc) (x) = reftable_malloc(st_mult(sizeof(*(x)), (alloc)))
#define REFTABLE_CALLOC_ARRAY(x, alloc) (x) = reftable_calloc((alloc), sizeof(*(x)))
#define REFTABLE_REALLOC_ARRAY(x, alloc) (x) = reftable_realloc((x), st_mult(sizeof(*(x)), (alloc)))
#define REFTABLE_ALLOC_GROW(x, nr, alloc) \
do { \
if ((nr) > alloc) { \
alloc = 2 * (alloc) + 1; \
if (alloc < (nr)) \
alloc = (nr); \
REFTABLE_REALLOC_ARRAY(x, alloc); \
} \
} while (0)
/* Find the longest shared prefix size of `a` and `b` */
struct strbuf;

View file

@ -51,12 +51,7 @@ static int block_writer_register_restart(struct block_writer *w, int n,
if (2 + 3 * rlen + n > w->block_size - w->next)
return -1;
if (is_restart) {
if (w->restart_len == w->restart_cap) {
w->restart_cap = w->restart_cap * 2 + 1;
w->restarts = reftable_realloc(
w->restarts, sizeof(uint32_t) * w->restart_cap);
}
REFTABLE_ALLOC_GROW(w->restarts, w->restart_len + 1, w->restart_cap);
w->restarts[w->restart_len++] = w->next;
}
@ -148,8 +143,10 @@ int block_writer_finish(struct block_writer *w)
int block_header_skip = 4 + w->header_off;
uLongf src_len = w->next - block_header_skip;
uLongf dest_cap = src_len * 1.001 + 12;
uint8_t *compressed;
REFTABLE_ALLOC_ARRAY(compressed, dest_cap);
uint8_t *compressed = reftable_malloc(dest_cap);
while (1) {
uLongf out_dest_len = dest_cap;
int zresult = compress2(compressed, &out_dest_len,
@ -206,9 +203,9 @@ int block_reader_init(struct block_reader *br, struct reftable_block *block,
uLongf dst_len = sz - block_header_skip; /* total size of dest
buffer. */
uLongf src_len = block->len - block_header_skip;
/* Log blocks specify the *uncompressed* size in their header.
*/
uncompressed = reftable_malloc(sz);
/* Log blocks specify the *uncompressed* size in their header. */
REFTABLE_ALLOC_ARRAY(uncompressed, sz);
/* Copy over the block header verbatim. It's not compressed. */
memcpy(uncompressed, block->data, block_header_skip);
@ -385,23 +382,23 @@ int block_reader_seek(struct block_reader *br, struct block_iter *it,
.key = *want,
.r = br,
};
struct reftable_record rec = reftable_new_record(block_reader_type(br));
int err = 0;
struct block_iter next = BLOCK_ITER_INIT;
struct reftable_record rec;
int err = 0, i;
int i = binsearch(br->restart_count, &restart_key_less, &args);
if (args.error) {
err = REFTABLE_FORMAT_ERROR;
goto done;
}
it->br = br;
if (i > 0) {
i--;
it->next_off = block_reader_restart_offset(br, i);
} else {
i = binsearch(br->restart_count, &restart_key_less, &args);
if (i > 0)
it->next_off = block_reader_restart_offset(br, i - 1);
else
it->next_off = br->header_off + 4;
}
it->br = br;
reftable_record_init(&rec, block_reader_type(br));
/* We're looking for the last entry less/equal than the wanted key, so
we have to go one entry too far and then back up.

View file

@ -36,7 +36,7 @@ static void test_block_read_write(void)
int j = 0;
struct strbuf want = STRBUF_INIT;
block.data = reftable_calloc(block_size);
REFTABLE_CALLOC_ARRAY(block.data, block_size);
block.len = block_size;
block.source = malloc_block_source();
block_writer_init(&bw, BLOCK_TYPE_REF, block.data, block_size,

View file

@ -29,7 +29,7 @@ static int strbuf_read_block(void *v, struct reftable_block *dest, uint64_t off,
{
struct strbuf *b = v;
assert(off + size <= b->len);
dest->data = reftable_calloc(size);
REFTABLE_CALLOC_ARRAY(dest->data, size);
memcpy(dest->data, b->buf + off, size);
dest->len = size;
return size;
@ -132,7 +132,7 @@ int reftable_block_source_from_file(struct reftable_block_source *bs,
return REFTABLE_IO_ERROR;
}
p = reftable_calloc(sizeof(*p));
REFTABLE_CALLOC_ARRAY(p, 1);
p->size = st.st_size;
p->data = xmmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
close(fd);

View file

@ -160,8 +160,7 @@ int new_indexed_table_ref_iter(struct indexed_table_ref_iter **dest,
int oid_len, uint64_t *offsets, int offset_len)
{
struct indexed_table_ref_iter empty = INDEXED_TABLE_REF_ITER_INIT;
struct indexed_table_ref_iter *itr =
reftable_calloc(sizeof(struct indexed_table_ref_iter));
struct indexed_table_ref_iter *itr = reftable_calloc(1, sizeof(*itr));
int err = 0;
*itr = empty;

View file

@ -19,24 +19,23 @@ license that can be found in the LICENSE file or at
static int merged_iter_init(struct merged_iter *mi)
{
int i = 0;
for (i = 0; i < mi->stack_len; i++) {
struct reftable_record rec = reftable_new_record(mi->typ);
int err = iterator_next(&mi->stack[i], &rec);
if (err < 0) {
return err;
}
for (size_t i = 0; i < mi->stack_len; i++) {
struct pq_entry e = {
.index = i,
};
int err;
reftable_record_init(&e.rec, mi->typ);
err = iterator_next(&mi->stack[i], &e.rec);
if (err < 0)
return err;
if (err > 0) {
reftable_iterator_destroy(&mi->stack[i]);
reftable_record_release(&rec);
} else {
struct pq_entry e = {
.rec = rec,
.index = i,
};
merged_iter_pqueue_add(&mi->pq, &e);
reftable_record_release(&e.rec);
continue;
}
merged_iter_pqueue_add(&mi->pq, &e);
}
return 0;
@ -45,11 +44,10 @@ static int merged_iter_init(struct merged_iter *mi)
static void merged_iter_close(void *p)
{
struct merged_iter *mi = p;
int i = 0;
merged_iter_pqueue_release(&mi->pq);
for (i = 0; i < mi->stack_len; i++) {
for (size_t i = 0; i < mi->stack_len; i++)
reftable_iterator_destroy(&mi->stack[i]);
}
reftable_free(mi->stack);
strbuf_release(&mi->key);
strbuf_release(&mi->entry_key);
@ -59,10 +57,12 @@ static int merged_iter_advance_nonnull_subiter(struct merged_iter *mi,
size_t idx)
{
struct pq_entry e = {
.rec = reftable_new_record(mi->typ),
.index = idx,
};
int err = iterator_next(&mi->stack[idx], &e.rec);
int err;
reftable_record_init(&e.rec, mi->typ);
err = iterator_next(&mi->stack[idx], &e.rec);
if (err < 0)
return err;
@ -168,14 +168,14 @@ static void iterator_from_merged_iter(struct reftable_iterator *it,
}
int reftable_new_merged_table(struct reftable_merged_table **dest,
struct reftable_table *stack, int n,
struct reftable_table *stack, size_t n,
uint32_t hash_id)
{
struct reftable_merged_table *m = NULL;
uint64_t last_max = 0;
uint64_t first_min = 0;
int i = 0;
for (i = 0; i < n; i++) {
for (size_t i = 0; i < n; i++) {
uint64_t min = reftable_table_min_update_index(&stack[i]);
uint64_t max = reftable_table_max_update_index(&stack[i]);
@ -190,7 +190,7 @@ int reftable_new_merged_table(struct reftable_merged_table **dest,
}
}
m = reftable_calloc(sizeof(struct reftable_merged_table));
REFTABLE_CALLOC_ARRAY(m, 1);
m->stack = stack;
m->stack_len = n;
m->min = first_min;
@ -239,50 +239,38 @@ static int merged_table_seek_record(struct reftable_merged_table *mt,
struct reftable_iterator *it,
struct reftable_record *rec)
{
struct reftable_iterator *iters = reftable_calloc(
sizeof(struct reftable_iterator) * mt->stack_len);
struct merged_iter merged = {
.stack = iters,
.typ = reftable_record_type(rec),
.hash_id = mt->hash_id,
.suppress_deletions = mt->suppress_deletions,
.key = STRBUF_INIT,
.entry_key = STRBUF_INIT,
};
int n = 0;
int err = 0;
int i = 0;
for (i = 0; i < mt->stack_len && err == 0; i++) {
int e = reftable_table_seek_record(&mt->stack[i], &iters[n],
rec);
if (e < 0) {
err = e;
}
if (e == 0) {
n++;
}
}
if (err < 0) {
int i = 0;
for (i = 0; i < n; i++) {
reftable_iterator_destroy(&iters[i]);
}
reftable_free(iters);
return err;
struct merged_iter *p;
int err;
REFTABLE_CALLOC_ARRAY(merged.stack, mt->stack_len);
for (size_t i = 0; i < mt->stack_len; i++) {
err = reftable_table_seek_record(&mt->stack[i],
&merged.stack[merged.stack_len], rec);
if (err < 0)
goto out;
if (!err)
merged.stack_len++;
}
merged.stack_len = n;
err = merged_iter_init(&merged);
if (err < 0) {
if (err < 0)
goto out;
p = reftable_malloc(sizeof(struct merged_iter));
*p = merged;
iterator_from_merged_iter(it, p);
out:
if (err < 0)
merged_iter_close(&merged);
return err;
} else {
struct merged_iter *p =
reftable_malloc(sizeof(struct merged_iter));
*p = merged;
iterator_from_merged_iter(it, p);
}
return 0;
return err;
}
int reftable_merged_table_seek_ref(struct reftable_merged_table *mt,

View file

@ -88,16 +88,17 @@ static struct reftable_merged_table *
merged_table_from_records(struct reftable_ref_record **refs,
struct reftable_block_source **source,
struct reftable_reader ***readers, int *sizes,
struct strbuf *buf, int n)
struct strbuf *buf, size_t n)
{
int i = 0;
struct reftable_merged_table *mt = NULL;
struct reftable_table *tabs;
int err;
struct reftable_table *tabs =
reftable_calloc(n * sizeof(struct reftable_table));
*readers = reftable_calloc(n * sizeof(struct reftable_reader *));
*source = reftable_calloc(n * sizeof(**source));
for (i = 0; i < n; i++) {
REFTABLE_CALLOC_ARRAY(tabs, n);
REFTABLE_CALLOC_ARRAY(*readers, n);
REFTABLE_CALLOC_ARRAY(*source, n);
for (size_t i = 0; i < n; i++) {
write_test_table(&buf[i], refs[i], sizes[i]);
block_source_from_strbuf(&(*source)[i], &buf[i]);
@ -231,14 +232,10 @@ static void test_merged(void)
while (len < 100) { /* cap loops/recursion. */
struct reftable_ref_record ref = { NULL };
int err = reftable_iterator_next_ref(&it, &ref);
if (err > 0) {
if (err > 0)
break;
}
if (len == cap) {
cap = 2 * cap + 1;
out = reftable_realloc(
out, sizeof(struct reftable_ref_record) * cap);
}
REFTABLE_ALLOC_GROW(out, len + 1, cap);
out[len++] = ref;
}
reftable_iterator_destroy(&it);
@ -265,16 +262,17 @@ static struct reftable_merged_table *
merged_table_from_log_records(struct reftable_log_record **logs,
struct reftable_block_source **source,
struct reftable_reader ***readers, int *sizes,
struct strbuf *buf, int n)
struct strbuf *buf, size_t n)
{
int i = 0;
struct reftable_merged_table *mt = NULL;
struct reftable_table *tabs;
int err;
struct reftable_table *tabs =
reftable_calloc(n * sizeof(struct reftable_table));
*readers = reftable_calloc(n * sizeof(struct reftable_reader *));
*source = reftable_calloc(n * sizeof(**source));
for (i = 0; i < n; i++) {
REFTABLE_CALLOC_ARRAY(tabs, n);
REFTABLE_CALLOC_ARRAY(*readers, n);
REFTABLE_CALLOC_ARRAY(*source, n);
for (size_t i = 0; i < n; i++) {
write_test_log_table(&buf[i], logs[i], sizes[i], i + 1);
block_source_from_strbuf(&(*source)[i], &buf[i]);
@ -368,14 +366,10 @@ static void test_merged_logs(void)
while (len < 100) { /* cap loops/recursion. */
struct reftable_log_record log = { NULL };
int err = reftable_iterator_next_log(&it, &log);
if (err > 0) {
if (err > 0)
break;
}
if (len == cap) {
cap = 2 * cap + 1;
out = reftable_realloc(
out, sizeof(struct reftable_log_record) * cap);
}
REFTABLE_ALLOC_GROW(out, len + 1, cap);
out[len++] = log;
}
reftable_iterator_destroy(&it);
@ -420,7 +414,7 @@ static void test_default_write_opts(void)
};
int err;
struct reftable_block_source source = { NULL };
struct reftable_table *tab = reftable_calloc(sizeof(*tab) * 1);
struct reftable_table *tab = reftable_calloc(1, sizeof(*tab));
uint32_t hash_id;
struct reftable_reader *rd = NULL;
struct reftable_merged_table *merged = NULL;

View file

@ -75,13 +75,9 @@ void merged_iter_pqueue_add(struct merged_iter_pqueue *pq, const struct pq_entry
{
int i = 0;
if (pq->len == pq->cap) {
pq->cap = 2 * pq->cap + 1;
pq->heap = reftable_realloc(pq->heap,
pq->cap * sizeof(struct pq_entry));
}
REFTABLE_ALLOC_GROW(pq->heap, pq->len + 1, pq->cap);
pq->heap[pq->len++] = *e;
i = pq->len - 1;
while (i > 0) {
int j = (i - 1) / 2;

View file

@ -37,8 +37,9 @@ void reftable_free(void *p)
free(p);
}
void *reftable_calloc(size_t sz)
void *reftable_calloc(size_t nelem, size_t elsize)
{
size_t sz = st_mult(nelem, elsize);
void *p = reftable_malloc(sz);
memset(p, 0, sz);
return p;

View file

@ -444,13 +444,13 @@ static int reader_start(struct reftable_reader *r, struct table_iter *ti,
static int reader_seek_linear(struct table_iter *ti,
struct reftable_record *want)
{
struct reftable_record rec =
reftable_new_record(reftable_record_type(want));
struct strbuf want_key = STRBUF_INIT;
struct strbuf got_key = STRBUF_INIT;
struct table_iter next = TABLE_ITER_INIT;
struct reftable_record rec;
int err = -1;
reftable_record_init(&rec, reftable_record_type(want));
reftable_record_key(want, &want_key);
while (1) {
@ -508,8 +508,38 @@ static int reader_seek_indexed(struct reftable_reader *r,
if (err < 0)
goto done;
/*
* The index may consist of multiple levels, where each level may have
* multiple index blocks. We start by doing a linear search in the
* highest layer that identifies the relevant index block as well as
* the record inside that block that corresponds to our wanted key.
*/
err = reader_seek_linear(&index_iter, &want_index);
if (err < 0)
goto done;
/*
* Traverse down the levels until we find a non-index entry.
*/
while (1) {
/*
* In case we seek a record that does not exist the index iter
* will tell us that the iterator is over. This works because
* the last index entry of the current level will contain the
* last key it knows about. So in case our seeked key is larger
* than the last indexed key we know that it won't exist.
*
* There is one subtlety in the layout of the index section
* that makes this work as expected: the highest-level index is
* at end of the section and will point backwards and thus we
* start reading from the end of the index section, not the
* beginning.
*
* If that wasn't the case and the order was reversed then the
* linear seek would seek into the lower levels and traverse
* all levels of the index only to find out that the key does
* not exist.
*/
err = table_iter_next(&index_iter, &index_result);
table_iter_block_done(&index_iter);
if (err != 0)
@ -539,8 +569,7 @@ static int reader_seek_indexed(struct reftable_reader *r,
if (err == 0) {
struct table_iter empty = TABLE_ITER_INIT;
struct table_iter *malloced =
reftable_calloc(sizeof(struct table_iter));
struct table_iter *malloced = reftable_calloc(1, sizeof(*malloced));
*malloced = empty;
table_iter_copy_from(malloced, &next);
iterator_from_table_iter(it, malloced);
@ -635,8 +664,7 @@ void reader_close(struct reftable_reader *r)
int reftable_new_reader(struct reftable_reader **p,
struct reftable_block_source *src, char const *name)
{
struct reftable_reader *rd =
reftable_calloc(sizeof(struct reftable_reader));
struct reftable_reader *rd = reftable_calloc(1, sizeof(*rd));
int err = init_reader(rd, src, name);
if (err == 0) {
*p = rd;
@ -711,7 +739,7 @@ static int reftable_reader_refs_for_unindexed(struct reftable_reader *r,
uint8_t *oid)
{
struct table_iter ti_empty = TABLE_ITER_INIT;
struct table_iter *ti = reftable_calloc(sizeof(struct table_iter));
struct table_iter *ti = reftable_calloc(1, sizeof(*ti));
struct filtering_ref_iterator *filter = NULL;
struct filtering_ref_iterator empty = FILTERING_REF_ITERATOR_INIT;
int oid_len = hash_size(r->hash_id);

View file

@ -56,7 +56,9 @@ static void write_table(char ***names, struct strbuf *buf, int N,
int i = 0, n;
struct reftable_log_record log = { NULL };
const struct reftable_stats *stats = NULL;
*names = reftable_calloc(sizeof(char *) * (N + 1));
REFTABLE_CALLOC_ARRAY(*names, N + 1);
reftable_writer_set_limits(w, update_index, update_index);
for (i = 0; i < N; i++) {
char name[100];
@ -188,7 +190,7 @@ static void test_log_overflow(void)
static void test_log_write_read(void)
{
int N = 2;
char **names = reftable_calloc(sizeof(char *) * (N + 1));
char **names = reftable_calloc(N + 1, sizeof(*names));
int err;
struct reftable_write_options opts = {
.block_size = 256,
@ -519,7 +521,7 @@ static void test_table_read_write_seek_index(void)
static void test_table_refs_for(int indexed)
{
int N = 50;
char **want_names = reftable_calloc(sizeof(char *) * (N + 1));
char **want_names = reftable_calloc(N + 1, sizeof(*want_names));
int want_names_len = 0;
uint8_t want_hash[GIT_SHA1_RAWSZ];
@ -866,6 +868,61 @@ static void test_write_multiple_indices(void)
strbuf_release(&buf);
}
static void test_write_multi_level_index(void)
{
struct reftable_write_options opts = {
.block_size = 100,
};
struct strbuf writer_buf = STRBUF_INIT, buf = STRBUF_INIT;
struct reftable_block_source source = { 0 };
struct reftable_iterator it = { 0 };
const struct reftable_stats *stats;
struct reftable_writer *writer;
struct reftable_reader *reader;
int err;
writer = reftable_new_writer(&strbuf_add_void, &noop_flush, &writer_buf, &opts);
reftable_writer_set_limits(writer, 1, 1);
for (size_t i = 0; i < 200; i++) {
struct reftable_ref_record ref = {
.update_index = 1,
.value_type = REFTABLE_REF_VAL1,
.value.val1 = {i},
};
strbuf_reset(&buf);
strbuf_addf(&buf, "refs/heads/%03" PRIuMAX, (uintmax_t)i);
ref.refname = buf.buf,
err = reftable_writer_add_ref(writer, &ref);
EXPECT_ERR(err);
}
reftable_writer_close(writer);
/*
* The written refs should be sufficiently large to result in a
* multi-level index.
*/
stats = reftable_writer_stats(writer);
EXPECT(stats->ref_stats.max_index_level == 2);
block_source_from_strbuf(&source, &writer_buf);
err = reftable_new_reader(&reader, &source, "filename");
EXPECT_ERR(err);
/*
* Seeking the last ref should work as expected.
*/
err = reftable_reader_seek_ref(reader, &it, "refs/heads/199");
EXPECT_ERR(err);
reftable_iterator_destroy(&it);
reftable_writer_free(writer);
reftable_reader_free(reader);
strbuf_release(&writer_buf);
strbuf_release(&buf);
}
static void test_corrupt_table_empty(void)
{
struct strbuf buf = STRBUF_INIT;
@ -916,5 +973,6 @@ int readwrite_test_main(int argc, const char *argv[])
RUN_TEST(test_write_object_id_length);
RUN_TEST(test_write_object_id_min_length);
RUN_TEST(test_write_multiple_indices);
RUN_TEST(test_write_multi_level_index);
return 0;
}

View file

@ -497,12 +497,13 @@ static void reftable_obj_record_copy_from(void *rec, const void *src_rec,
(const struct reftable_obj_record *)src_rec;
reftable_obj_record_release(obj);
obj->hash_prefix = reftable_malloc(src->hash_prefix_len);
REFTABLE_ALLOC_ARRAY(obj->hash_prefix, src->hash_prefix_len);
obj->hash_prefix_len = src->hash_prefix_len;
if (src->hash_prefix_len)
memcpy(obj->hash_prefix, src->hash_prefix, obj->hash_prefix_len);
obj->offsets = reftable_malloc(src->offset_len * sizeof(uint64_t));
REFTABLE_ALLOC_ARRAY(obj->offsets, src->offset_len);
obj->offset_len = src->offset_len;
COPY_ARRAY(obj->offsets, src->offsets, src->offset_len);
}
@ -559,7 +560,8 @@ static int reftable_obj_record_decode(void *rec, struct strbuf key,
int n = 0;
uint64_t last;
int j;
r->hash_prefix = reftable_malloc(key.len);
REFTABLE_ALLOC_ARRAY(r->hash_prefix, key.len);
memcpy(r->hash_prefix, key.buf, key.len);
r->hash_prefix_len = key.len;
@ -577,7 +579,7 @@ static int reftable_obj_record_decode(void *rec, struct strbuf key,
if (count == 0)
return start.len - in.len;
r->offsets = reftable_malloc(count * sizeof(uint64_t));
REFTABLE_ALLOC_ARRAY(r->offsets, count);
r->offset_len = count;
n = get_var_int(&r->offsets[0], &in);
@ -715,12 +717,12 @@ static void reftable_log_record_copy_from(void *rec, const void *src_rec,
}
if (dst->value.update.new_hash) {
dst->value.update.new_hash = reftable_malloc(hash_size);
REFTABLE_ALLOC_ARRAY(dst->value.update.new_hash, hash_size);
memcpy(dst->value.update.new_hash,
src->value.update.new_hash, hash_size);
}
if (dst->value.update.old_hash) {
dst->value.update.old_hash = reftable_malloc(hash_size);
REFTABLE_ALLOC_ARRAY(dst->value.update.old_hash, hash_size);
memcpy(dst->value.update.old_hash,
src->value.update.old_hash, hash_size);
}
@ -1257,45 +1259,22 @@ reftable_record_vtable(struct reftable_record *rec)
abort();
}
struct reftable_record reftable_new_record(uint8_t typ)
void reftable_record_init(struct reftable_record *rec, uint8_t typ)
{
struct reftable_record clean = {
.type = typ,
};
memset(rec, 0, sizeof(*rec));
rec->type = typ;
/* the following is involved, but the naive solution (just return
* `clean` as is, except for BLOCK_TYPE_INDEX), returns a garbage
* clean.u.obj.offsets pointer on Windows VS CI. Go figure.
*/
switch (typ) {
case BLOCK_TYPE_OBJ:
{
struct reftable_obj_record obj = { 0 };
clean.u.obj = obj;
break;
}
case BLOCK_TYPE_INDEX:
{
struct reftable_index_record idx = {
.last_key = STRBUF_INIT,
};
clean.u.idx = idx;
break;
}
case BLOCK_TYPE_REF:
{
struct reftable_ref_record ref = { 0 };
clean.u.ref = ref;
break;
}
case BLOCK_TYPE_LOG:
{
struct reftable_log_record log = { 0 };
clean.u.log = log;
break;
case BLOCK_TYPE_OBJ:
return;
case BLOCK_TYPE_INDEX:
strbuf_init(&rec->u.idx.last_key, 0);
return;
default:
BUG("unhandled record type");
}
}
return clean;
}
void reftable_record_print(struct reftable_record *rec, int hash_size)

View file

@ -69,9 +69,6 @@ struct reftable_record_vtable {
/* returns true for recognized block types. Block start with the block type. */
int reftable_is_block_type(uint8_t typ);
/* return an initialized record for the given type */
struct reftable_record reftable_new_record(uint8_t typ);
/* Encode `key` into `dest`. Sets `is_restart` to indicate a restart. Returns
* number of bytes written. */
int reftable_encode_key(int *is_restart, struct string_view dest,
@ -100,8 +97,8 @@ struct reftable_obj_record {
/* record is a generic wrapper for different types of records. It is normally
* created on the stack, or embedded within another struct. If the type is
* known, a fresh instance can be initialized explicitly. Otherwise, use
* reftable_new_record() to initialize generically (as the index_record is not
* valid as 0-initialized structure)
* `reftable_record_init()` to initialize generically (as the index_record is
* not valid as 0-initialized structure)
*/
struct reftable_record {
uint8_t type;
@ -113,6 +110,9 @@ struct reftable_record {
} u;
};
/* Initialize the reftable record for the given type */
void reftable_record_init(struct reftable_record *rec, uint8_t typ);
/* see struct record_vtable */
int reftable_record_equal(struct reftable_record *a, struct reftable_record *b, int hash_size);
void reftable_record_print(struct reftable_record *rec, int hash_size);

View file

@ -16,11 +16,11 @@
static void test_copy(struct reftable_record *rec)
{
struct reftable_record copy = { 0 };
struct reftable_record copy;
uint8_t typ;
typ = reftable_record_type(rec);
copy = reftable_new_record(typ);
reftable_record_init(&copy, typ);
reftable_record_copy_from(&copy, rec, GIT_SHA1_RAWSZ);
/* do it twice to catch memory leaks */
reftable_record_copy_from(&copy, rec, GIT_SHA1_RAWSZ);
@ -231,8 +231,8 @@ static void test_reftable_log_record_roundtrip(void)
.value_type = REFTABLE_LOG_UPDATE,
.value = {
.update = {
.new_hash = reftable_calloc(GIT_SHA1_RAWSZ),
.old_hash = reftable_calloc(GIT_SHA1_RAWSZ),
.new_hash = reftable_calloc(GIT_SHA1_RAWSZ, 1),
.old_hash = reftable_calloc(GIT_SHA1_RAWSZ, 1),
.name = xstrdup("old name"),
.email = xstrdup("old@email"),
.message = xstrdup("old message"),

View file

@ -140,8 +140,8 @@ int validate_ref_record_addition(struct reftable_table tab,
{
struct modification mod = {
.tab = tab,
.add = reftable_calloc(sizeof(char *) * sz),
.del = reftable_calloc(sizeof(char *) * sz),
.add = reftable_calloc(sz, sizeof(*mod.add)),
.del = reftable_calloc(sz, sizeof(*mod.del)),
};
int i = 0;
int err = 0;

View file

@ -33,7 +33,7 @@ struct reftable_table;
the stack array.
*/
int reftable_new_merged_table(struct reftable_merged_table **dest,
struct reftable_table *stack, int n,
struct reftable_table *stack, size_t n,
uint32_t hash_id);
/* returns an iterator positioned just before 'name' */

View file

@ -24,7 +24,8 @@ static int stack_try_add(struct reftable_stack *st,
void *arg),
void *arg);
static int stack_write_compact(struct reftable_stack *st,
struct reftable_writer *wr, int first, int last,
struct reftable_writer *wr,
size_t first, size_t last,
struct reftable_log_expiry_config *config);
static int stack_check_addition(struct reftable_stack *st,
const char *new_tab_name);
@ -57,8 +58,7 @@ static int reftable_fd_flush(void *arg)
int reftable_new_stack(struct reftable_stack **dest, const char *dir,
struct reftable_write_options config)
{
struct reftable_stack *p =
reftable_calloc(sizeof(struct reftable_stack));
struct reftable_stack *p = reftable_calloc(1, sizeof(*p));
struct strbuf list_file_name = STRBUF_INIT;
int err = 0;
@ -101,7 +101,7 @@ static int fd_read_lines(int fd, char ***namesp)
goto done;
}
buf = reftable_malloc(size + 1);
REFTABLE_ALLOC_ARRAY(buf, size + 1);
if (read_in_full(fd, buf, size) != size) {
err = REFTABLE_IO_ERROR;
goto done;
@ -121,7 +121,7 @@ int read_lines(const char *filename, char ***namesp)
int err = 0;
if (fd < 0) {
if (errno == ENOENT) {
*namesp = reftable_calloc(sizeof(char *));
REFTABLE_CALLOC_ARRAY(*namesp, 1);
return 0;
}
@ -198,8 +198,7 @@ void reftable_stack_destroy(struct reftable_stack *st)
static struct reftable_reader **stack_copy_readers(struct reftable_stack *st,
int cur_len)
{
struct reftable_reader **cur =
reftable_calloc(sizeof(struct reftable_reader *) * cur_len);
struct reftable_reader **cur = reftable_calloc(cur_len, sizeof(*cur));
int i = 0;
for (i = 0; i < cur_len; i++) {
cur[i] = st->readers[i];
@ -210,18 +209,18 @@ static struct reftable_reader **stack_copy_readers(struct reftable_stack *st,
static int reftable_stack_reload_once(struct reftable_stack *st, char **names,
int reuse_open)
{
int cur_len = !st->merged ? 0 : st->merged->stack_len;
size_t cur_len = !st->merged ? 0 : st->merged->stack_len;
struct reftable_reader **cur = stack_copy_readers(st, cur_len);
int err = 0;
int names_len = names_length(names);
size_t names_len = names_length(names);
struct reftable_reader **new_readers =
reftable_calloc(sizeof(struct reftable_reader *) * names_len);
reftable_calloc(names_len, sizeof(*new_readers));
struct reftable_table *new_tables =
reftable_calloc(sizeof(struct reftable_table) * names_len);
int new_readers_len = 0;
reftable_calloc(names_len, sizeof(*new_tables));
size_t new_readers_len = 0;
struct reftable_merged_table *new_merged = NULL;
struct strbuf table_path = STRBUF_INIT;
int i;
int err = 0;
size_t i;
while (*names) {
struct reftable_reader *rd = NULL;
@ -229,11 +228,10 @@ static int reftable_stack_reload_once(struct reftable_stack *st, char **names,
/* this is linear; we assume compaction keeps the number of
tables under control so this is not quadratic. */
int j = 0;
for (j = 0; reuse_open && j < cur_len; j++) {
if (cur[j] && 0 == strcmp(cur[j]->name, name)) {
rd = cur[j];
cur[j] = NULL;
for (i = 0; reuse_open && i < cur_len; i++) {
if (cur[i] && 0 == strcmp(cur[i]->name, name)) {
rd = cur[i];
cur[i] = NULL;
break;
}
}
@ -351,7 +349,7 @@ static int reftable_stack_reload_maybe_reuse(struct reftable_stack *st,
goto out;
}
names = reftable_calloc(sizeof(char *));
REFTABLE_CALLOC_ARRAY(names, 1);
} else {
err = fd_read_lines(fd, &names);
if (err < 0)
@ -558,7 +556,7 @@ struct reftable_addition {
struct reftable_stack *stack;
char **new_tables;
int new_tables_len;
size_t new_tables_len, new_tables_cap;
uint64_t next_update_index;
};
@ -609,8 +607,9 @@ static int reftable_stack_init_addition(struct reftable_addition *add,
static void reftable_addition_close(struct reftable_addition *add)
{
int i = 0;
struct strbuf nm = STRBUF_INIT;
size_t i;
for (i = 0; i < add->new_tables_len; i++) {
stack_filename(&nm, add->stack, add->new_tables[i]);
unlink(nm.buf);
@ -620,6 +619,7 @@ static void reftable_addition_close(struct reftable_addition *add)
reftable_free(add->new_tables);
add->new_tables = NULL;
add->new_tables_len = 0;
add->new_tables_cap = 0;
delete_tempfile(&add->lock_file);
strbuf_release(&nm);
@ -638,8 +638,8 @@ int reftable_addition_commit(struct reftable_addition *add)
{
struct strbuf table_list = STRBUF_INIT;
int lock_file_fd = get_tempfile_fd(add->lock_file);
int i = 0;
int err = 0;
size_t i;
if (add->new_tables_len == 0)
goto done;
@ -670,12 +670,12 @@ int reftable_addition_commit(struct reftable_addition *add)
}
/* success, no more state to clean up. */
for (i = 0; i < add->new_tables_len; i++) {
for (i = 0; i < add->new_tables_len; i++)
reftable_free(add->new_tables[i]);
}
reftable_free(add->new_tables);
add->new_tables = NULL;
add->new_tables_len = 0;
add->new_tables_cap = 0;
err = reftable_stack_reload_maybe_reuse(add->stack, 1);
if (err)
@ -694,7 +694,7 @@ int reftable_stack_new_addition(struct reftable_addition **dest,
{
int err = 0;
struct reftable_addition empty = REFTABLE_ADDITION_INIT;
*dest = reftable_calloc(sizeof(**dest));
REFTABLE_CALLOC_ARRAY(*dest, 1);
**dest = empty;
err = reftable_stack_init_addition(*dest, st);
if (err) {
@ -802,11 +802,9 @@ int reftable_addition_add(struct reftable_addition *add,
goto done;
}
add->new_tables = reftable_realloc(add->new_tables,
sizeof(*add->new_tables) *
(add->new_tables_len + 1));
add->new_tables[add->new_tables_len] = strbuf_detach(&next_name, NULL);
add->new_tables_len++;
REFTABLE_ALLOC_GROW(add->new_tables, add->new_tables_len + 1,
add->new_tables_cap);
add->new_tables[add->new_tables_len++] = strbuf_detach(&next_name, NULL);
done:
if (tab_fd > 0) {
close(tab_fd);
@ -832,7 +830,8 @@ uint64_t reftable_stack_next_update_index(struct reftable_stack *st)
return 1;
}
static int stack_compact_locked(struct reftable_stack *st, int first, int last,
static int stack_compact_locked(struct reftable_stack *st,
size_t first, size_t last,
struct strbuf *temp_tab,
struct reftable_log_expiry_config *config)
{
@ -882,22 +881,21 @@ static int stack_compact_locked(struct reftable_stack *st, int first, int last,
}
static int stack_write_compact(struct reftable_stack *st,
struct reftable_writer *wr, int first, int last,
struct reftable_writer *wr,
size_t first, size_t last,
struct reftable_log_expiry_config *config)
{
int subtabs_len = last - first + 1;
size_t subtabs_len = last - first + 1;
struct reftable_table *subtabs = reftable_calloc(
sizeof(struct reftable_table) * (last - first + 1));
last - first + 1, sizeof(*subtabs));
struct reftable_merged_table *mt = NULL;
int err = 0;
struct reftable_iterator it = { NULL };
struct reftable_ref_record ref = { NULL };
struct reftable_log_record log = { NULL };
uint64_t entries = 0;
int err = 0;
int i = 0, j = 0;
for (i = first, j = 0; i <= last; i++) {
for (size_t i = first, j = 0; i <= last; i++) {
struct reftable_reader *t = st->readers[i];
reftable_table_from_reader(&subtabs[j++], t);
st->stats.bytes += t->size;
@ -981,25 +979,20 @@ static int stack_write_compact(struct reftable_stack *st,
}
/* < 0: error. 0 == OK, > 0 attempt failed; could retry. */
static int stack_compact_range(struct reftable_stack *st, int first, int last,
static int stack_compact_range(struct reftable_stack *st,
size_t first, size_t last,
struct reftable_log_expiry_config *expiry)
{
char **delete_on_success = NULL, **subtable_locks = NULL, **listp = NULL;
struct strbuf temp_tab_file_name = STRBUF_INIT;
struct strbuf new_table_name = STRBUF_INIT;
struct strbuf lock_file_name = STRBUF_INIT;
struct strbuf ref_list_contents = STRBUF_INIT;
struct strbuf new_table_path = STRBUF_INIT;
size_t i, j, compact_count;
int err = 0;
int have_lock = 0;
int lock_file_fd = -1;
int compact_count = last - first + 1;
char **listp = NULL;
char **delete_on_success =
reftable_calloc(sizeof(char *) * (compact_count + 1));
char **subtable_locks =
reftable_calloc(sizeof(char *) * (compact_count + 1));
int i = 0;
int j = 0;
int is_empty_table = 0;
if (first > last || (!expiry && first == last)) {
@ -1007,6 +1000,10 @@ static int stack_compact_range(struct reftable_stack *st, int first, int last,
goto done;
}
compact_count = last - first + 1;
REFTABLE_CALLOC_ARRAY(delete_on_success, compact_count + 1);
REFTABLE_CALLOC_ARRAY(subtable_locks, compact_count + 1);
st->stats.attempts++;
strbuf_reset(&lock_file_name);
@ -1172,12 +1169,14 @@ static int stack_compact_range(struct reftable_stack *st, int first, int last,
done:
free_names(delete_on_success);
listp = subtable_locks;
while (*listp) {
unlink(*listp);
listp++;
if (subtable_locks) {
listp = subtable_locks;
while (*listp) {
unlink(*listp);
listp++;
}
free_names(subtable_locks);
}
free_names(subtable_locks);
if (lock_file_fd >= 0) {
close(lock_file_fd);
lock_file_fd = -1;
@ -1196,17 +1195,17 @@ static int stack_compact_range(struct reftable_stack *st, int first, int last,
int reftable_stack_compact_all(struct reftable_stack *st,
struct reftable_log_expiry_config *config)
{
return stack_compact_range(st, 0, st->merged->stack_len - 1, config);
return stack_compact_range(st, 0, st->merged->stack_len ?
st->merged->stack_len - 1 : 0, config);
}
static int stack_compact_range_stats(struct reftable_stack *st, int first,
int last,
static int stack_compact_range_stats(struct reftable_stack *st,
size_t first, size_t last,
struct reftable_log_expiry_config *config)
{
int err = stack_compact_range(st, first, last, config);
if (err > 0) {
if (err > 0)
st->stats.failures++;
}
return err;
}
@ -1226,12 +1225,11 @@ int fastlog2(uint64_t sz)
return l - 1;
}
struct segment *sizes_to_segments(int *seglen, uint64_t *sizes, int n)
struct segment *sizes_to_segments(size_t *seglen, uint64_t *sizes, size_t n)
{
struct segment *segs = reftable_calloc(sizeof(struct segment) * n);
int next = 0;
struct segment *segs = reftable_calloc(n, sizeof(*segs));
struct segment cur = { 0 };
int i = 0;
size_t next = 0, i;
if (n == 0) {
*seglen = 0;
@ -1257,29 +1255,27 @@ struct segment *sizes_to_segments(int *seglen, uint64_t *sizes, int n)
return segs;
}
struct segment suggest_compaction_segment(uint64_t *sizes, int n)
struct segment suggest_compaction_segment(uint64_t *sizes, size_t n)
{
int seglen = 0;
struct segment *segs = sizes_to_segments(&seglen, sizes, n);
struct segment min_seg = {
.log = 64,
};
int i = 0;
for (i = 0; i < seglen; i++) {
if (segment_size(&segs[i]) == 1) {
continue;
}
struct segment *segs;
size_t seglen = 0, i;
if (segs[i].log < min_seg.log) {
segs = sizes_to_segments(&seglen, sizes, n);
for (i = 0; i < seglen; i++) {
if (segment_size(&segs[i]) == 1)
continue;
if (segs[i].log < min_seg.log)
min_seg = segs[i];
}
}
while (min_seg.start > 0) {
int prev = min_seg.start - 1;
if (fastlog2(min_seg.bytes) < fastlog2(sizes[prev])) {
size_t prev = min_seg.start - 1;
if (fastlog2(min_seg.bytes) < fastlog2(sizes[prev]))
break;
}
min_seg.start = prev;
min_seg.bytes += sizes[prev];
@ -1292,7 +1288,7 @@ struct segment suggest_compaction_segment(uint64_t *sizes, int n)
static uint64_t *stack_table_sizes_for_compaction(struct reftable_stack *st)
{
uint64_t *sizes =
reftable_calloc(sizeof(uint64_t) * st->merged->stack_len);
reftable_calloc(st->merged->stack_len, sizeof(*sizes));
int version = (st->config.hash_id == GIT_SHA1_FORMAT_ID) ? 1 : 2;
int overhead = header_size(version) - 1;
int i = 0;
@ -1391,17 +1387,12 @@ static int stack_check_addition(struct reftable_stack *st,
while (1) {
struct reftable_ref_record ref = { NULL };
err = reftable_iterator_next_ref(&it, &ref);
if (err > 0) {
if (err > 0)
break;
}
if (err < 0)
goto done;
if (len >= cap) {
cap = 2 * cap + 1;
refs = reftable_realloc(refs, cap * sizeof(refs[0]));
}
REFTABLE_ALLOC_GROW(refs, len + 1, cap);
refs[len++] = ref;
}

View file

@ -32,13 +32,13 @@ struct reftable_stack {
int read_lines(const char *filename, char ***lines);
struct segment {
int start, end;
size_t start, end;
int log;
uint64_t bytes;
};
int fastlog2(uint64_t sz);
struct segment *sizes_to_segments(int *seglen, uint64_t *sizes, int n);
struct segment suggest_compaction_segment(uint64_t *sizes, int n);
struct segment *sizes_to_segments(size_t *seglen, uint64_t *sizes, size_t n);
struct segment suggest_compaction_segment(uint64_t *sizes, size_t n);
#endif

View file

@ -732,7 +732,7 @@ static void test_sizes_to_segments(void)
uint64_t sizes[] = { 2, 3, 4, 5, 7, 9 };
/* .................0 1 2 3 4 5 */
int seglen = 0;
size_t seglen = 0;
struct segment *segs =
sizes_to_segments(&seglen, sizes, ARRAY_SIZE(sizes));
EXPECT(segs[2].log == 3);
@ -747,7 +747,7 @@ static void test_sizes_to_segments(void)
static void test_sizes_to_segments_empty(void)
{
int seglen = 0;
size_t seglen = 0;
struct segment *segs = sizes_to_segments(&seglen, NULL, 0);
EXPECT(seglen == 0);
reftable_free(segs);
@ -756,8 +756,7 @@ static void test_sizes_to_segments_empty(void)
static void test_sizes_to_segments_all_equal(void)
{
uint64_t sizes[] = { 5, 5 };
int seglen = 0;
size_t seglen = 0;
struct segment *segs =
sizes_to_segments(&seglen, sizes, ARRAY_SIZE(sizes));
EXPECT(seglen == 1);

View file

@ -20,8 +20,8 @@ struct tree_node *tree_search(void *key, struct tree_node **rootp,
if (!insert) {
return NULL;
} else {
struct tree_node *n =
reftable_calloc(sizeof(struct tree_node));
struct tree_node *n;
REFTABLE_CALLOC_ARRAY(n, 1);
n->key = key;
*rootp = n;
return *rootp;

View file

@ -49,7 +49,7 @@ static int padded_write(struct reftable_writer *w, uint8_t *data, size_t len,
{
int n = 0;
if (w->pending_padding > 0) {
uint8_t *zeroed = reftable_calloc(w->pending_padding);
uint8_t *zeroed = reftable_calloc(w->pending_padding, sizeof(*zeroed));
int n = w->write(w->write_arg, zeroed, w->pending_padding);
if (n < 0)
return n;
@ -124,8 +124,7 @@ reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
int (*flush_func)(void *),
void *writer_arg, struct reftable_write_options *opts)
{
struct reftable_writer *wp =
reftable_calloc(sizeof(struct reftable_writer));
struct reftable_writer *wp = reftable_calloc(1, sizeof(*wp));
strbuf_init(&wp->block_writer_data.last_key, 0);
options_set_defaults(opts);
if (opts->block_size >= (1 << 24)) {
@ -133,7 +132,7 @@ reftable_new_writer(ssize_t (*writer_func)(void *, const void *, size_t),
abort();
}
wp->last_key = reftable_empty_strbuf;
wp->block = reftable_calloc(opts->block_size);
REFTABLE_CALLOC_ARRAY(wp->block, opts->block_size);
wp->write = writer_func;
wp->write_arg = writer_arg;
wp->opts = *opts;
@ -202,12 +201,7 @@ static void writer_index_hash(struct reftable_writer *w, struct strbuf *hash)
return;
}
if (key->offset_len == key->offset_cap) {
key->offset_cap = 2 * key->offset_cap + 1;
key->offsets = reftable_realloc(
key->offsets, sizeof(uint64_t) * key->offset_cap);
}
REFTABLE_ALLOC_GROW(key->offsets, key->offset_len + 1, key->offset_cap);
key->offsets[key->offset_len++] = off;
}
@ -379,20 +373,39 @@ int reftable_writer_add_logs(struct reftable_writer *w,
static int writer_finish_section(struct reftable_writer *w)
{
struct reftable_block_stats *bstats = NULL;
uint8_t typ = block_writer_type(w->block_writer);
uint64_t index_start = 0;
int max_level = 0;
int threshold = w->opts.unpadded ? 1 : 3;
size_t threshold = w->opts.unpadded ? 1 : 3;
int before_blocks = w->stats.idx_stats.blocks;
int err = writer_flush_block(w);
int i = 0;
struct reftable_block_stats *bstats = NULL;
int err;
err = writer_flush_block(w);
if (err < 0)
return err;
/*
* When the section we are about to index has a lot of blocks then the
* index itself may span across multiple blocks, as well. This would
* require a linear scan over index blocks only to find the desired
* indexed block, which is inefficient. Instead, we write a multi-level
* index where index records of level N+1 will refer to index blocks of
* level N. This isn't constant time, either, but at least logarithmic.
*
* This loop handles writing this multi-level index. Note that we write
* the lowest-level index pointing to the indexed blocks first. We then
* continue writing additional index levels until the current level has
* less blocks than the threshold so that the highest level will be at
* the end of the index section.
*
* Readers are thus required to start reading the index section from
* its end, which is why we set `index_start` to the beginning of the
* last index section.
*/
while (w->index_len > threshold) {
struct reftable_index_record *idx = NULL;
int idx_len = 0;
size_t i, idx_len;
max_level++;
index_start = w->next;
@ -411,33 +424,26 @@ static int writer_finish_section(struct reftable_writer *w)
.idx = idx[i],
},
};
if (block_writer_add(w->block_writer, &rec) == 0) {
continue;
}
err = writer_flush_block(w);
err = writer_add_record(w, &rec);
if (err < 0)
return err;
writer_reinit_block_writer(w, BLOCK_TYPE_INDEX);
err = block_writer_add(w->block_writer, &rec);
if (err != 0) {
/* write into fresh block should always succeed
*/
abort();
}
}
for (i = 0; i < idx_len; i++) {
err = writer_flush_block(w);
if (err < 0)
return err;
for (i = 0; i < idx_len; i++)
strbuf_release(&idx[i].last_key);
}
reftable_free(idx);
}
err = writer_flush_block(w);
if (err < 0)
return err;
/*
* The index may still contain a number of index blocks lower than the
* threshold. Clear it so that these entries don't leak into the next
* index section.
*/
writer_clear_index(w);
bstats = writer_reftable_block_stats(w, typ);
@ -630,11 +636,8 @@ int reftable_writer_close(struct reftable_writer *w)
static void writer_clear_index(struct reftable_writer *w)
{
int i = 0;
for (i = 0; i < w->index_len; i++) {
for (size_t i = 0; i < w->index_len; i++)
strbuf_release(&w->index[i].last_key);
}
FREE_AND_NULL(w->index);
w->index_len = 0;
w->index_cap = 0;
@ -682,12 +685,7 @@ static int writer_flush_nonempty_block(struct reftable_writer *w)
if (err < 0)
return err;
if (w->index_cap == w->index_len) {
w->index_cap = 2 * w->index_cap + 1;
w->index = reftable_realloc(
w->index,
sizeof(struct reftable_index_record) * w->index_cap);
}
REFTABLE_ALLOC_GROW(w->index, w->index_len + 1, w->index_cap);
ir.offset = w->next;
strbuf_reset(&ir.last_key);

View file

@ -43,6 +43,7 @@ void prepare_repo_settings(struct repository *r)
if (experimental) {
r->settings.fetch_negotiation_algorithm = FETCH_NEGOTIATION_SKIPPING;
r->settings.pack_use_bitmap_boundary_traversal = 1;
r->settings.pack_use_multi_pack_reuse = 1;
}
if (manyfiles) {
r->settings.index_version = 4;

View file

@ -39,6 +39,7 @@ struct repo_settings {
int sparse_index;
int pack_read_reverse_index;
int pack_use_bitmap_boundary_traversal;
int pack_use_multi_pack_reuse;
/*
* Does this repository have core.useReplaceRefs=true (on by

View file

@ -973,6 +973,9 @@ static int handle_cache(struct index_state *istate,
mmfile[i].ptr = repo_read_object_file(the_repository,
&ce->oid, &type,
&size);
if (!mmfile[i].ptr)
die(_("unable to read %s"),
oid_to_hex(&ce->oid));
mmfile[i].size = size;
}
}

View file

@ -3641,6 +3641,7 @@ static int do_exec(struct repository *r, const char *command_line)
fprintf(stderr, _("Executing: %s\n"), command_line);
cmd.use_shell = 1;
strvec_push(&cmd.args, command_line);
strvec_push(&cmd.env, "GIT_CHERRY_PICK_HELP");
status = run_command(&cmd);
/* force re-reading of the cache */

View file

@ -524,51 +524,51 @@ test_expect_success 'given old value for missing pseudoref, do not create' '
test_expect_success 'create pseudoref' '
git update-ref PSEUDOREF $A &&
test $A = $(git rev-parse PSEUDOREF)
test $A = $(git show-ref -s --verify PSEUDOREF)
'
test_expect_success 'overwrite pseudoref with no old value given' '
git update-ref PSEUDOREF $B &&
test $B = $(git rev-parse PSEUDOREF)
test $B = $(git show-ref -s --verify PSEUDOREF)
'
test_expect_success 'overwrite pseudoref with correct old value' '
git update-ref PSEUDOREF $C $B &&
test $C = $(git rev-parse PSEUDOREF)
test $C = $(git show-ref -s --verify PSEUDOREF)
'
test_expect_success 'do not overwrite pseudoref with wrong old value' '
test_must_fail git update-ref PSEUDOREF $D $E 2>err &&
test $C = $(git rev-parse PSEUDOREF) &&
test $C = $(git show-ref -s --verify PSEUDOREF) &&
test_grep "cannot lock ref.*expected" err
'
test_expect_success 'delete pseudoref' '
git update-ref -d PSEUDOREF &&
test_must_fail git rev-parse PSEUDOREF
test_must_fail git show-ref -s --verify PSEUDOREF
'
test_expect_success 'do not delete pseudoref with wrong old value' '
git update-ref PSEUDOREF $A &&
test_must_fail git update-ref -d PSEUDOREF $B 2>err &&
test $A = $(git rev-parse PSEUDOREF) &&
test $A = $(git show-ref -s --verify PSEUDOREF) &&
test_grep "cannot lock ref.*expected" err
'
test_expect_success 'delete pseudoref with correct old value' '
git update-ref -d PSEUDOREF $A &&
test_must_fail git rev-parse PSEUDOREF
test_must_fail git show-ref -s --verify PSEUDOREF
'
test_expect_success 'create pseudoref with old OID zero' '
git update-ref PSEUDOREF $A $Z &&
test $A = $(git rev-parse PSEUDOREF)
test $A = $(git show-ref -s --verify PSEUDOREF)
'
test_expect_success 'do not overwrite pseudoref with old OID zero' '
test_when_finished git update-ref -d PSEUDOREF &&
test_must_fail git update-ref PSEUDOREF $B $Z 2>err &&
test $A = $(git rev-parse PSEUDOREF) &&
test $A = $(git show-ref -s --verify PSEUDOREF) &&
test_grep "already exists" err
'

View file

@ -174,6 +174,14 @@ test_expect_success 'show-ref --verify HEAD' '
test_must_be_empty actual
'
test_expect_success 'show-ref --verify pseudorefs' '
git update-ref CHERRY_PICK_HEAD HEAD $ZERO_OID &&
test_when_finished "git update-ref -d CHERRY_PICK_HEAD" &&
git show-ref -s --verify HEAD >actual &&
git show-ref -s --verify CHERRY_PICK_HEAD >expect &&
test_cmp actual expect
'
test_expect_success 'show-ref --verify with dangling ref' '
sha1_file() {
echo "$*" | sed "s#..#.git/objects/&/#"

View file

@ -153,6 +153,18 @@ test_expect_success 'rebase -i with the exec command checks tree cleanness' '
git rebase --continue
'
test_expect_success 'cherry-pick works with rebase --exec' '
test_when_finished "git cherry-pick --abort; \
git rebase --abort; \
git checkout primary" &&
echo "exec git cherry-pick G" >todo &&
(
set_replace_editor todo &&
test_must_fail git rebase -i D D
) &&
test_cmp_rev G CHERRY_PICK_HEAD
'
test_expect_success 'rebase -x with empty command fails' '
test_when_finished "git rebase --abort ||:" &&
test_must_fail env git rebase -x "" @ 2>actual &&

View file

@ -40,9 +40,24 @@ testrebase() {
test_path_is_missing "$state_dir"
'
test_expect_success "pre rebase$type head is marked as reachable" '
# Clean up the state from the previous one
git checkout -f --detach pre-rebase &&
test_tick &&
git commit --amend --only -m "reworded" &&
orig_head=$(git rev-parse HEAD) &&
test_must_fail git rebase$type main &&
# Stop ORIG_HEAD marking $state_dir/orig-head as reachable
git update-ref -d ORIG_HEAD &&
git reflog expire --expire="$GIT_COMMITTER_DATE" --all &&
git prune --expire=now &&
git rebase --abort &&
test_cmp_rev $orig_head HEAD
'
test_expect_success "rebase$type --abort after --skip" '
# Clean up the state from the previous one
git reset --hard pre-rebase &&
git checkout -B to-rebase pre-rebase &&
test_must_fail git rebase$type main &&
test_path_is_dir "$state_dir" &&
test_must_fail git rebase --skip &&

View file

@ -333,4 +333,14 @@ test_expect_success 'never change active branch' '
test_cmp_rev not-the-feature-branch unrelated-onto-branch
'
test_expect_success 'autostash commit is marked as reachable' '
echo changed >file0 &&
git rebase --autostash --exec "git prune --expire=now" \
feature-branch^ feature-branch &&
# git rebase succeeds if the stash cannot be applied so we need to check
# the contents of file0
echo changed >expect &&
test_cmp expect file0
'
test_done

View file

@ -1516,4 +1516,56 @@ test_expect_success 'restore untracked files even when we hit conflicts' '
)
'
test_expect_success 'stash create reports a locked index' '
test_when_finished "rm -rf repo" &&
git init repo &&
(
cd repo &&
test_commit A A.file &&
echo change >A.file &&
touch .git/index.lock &&
cat >expect <<-EOF &&
error: could not write index
EOF
test_must_fail git stash create 2>err &&
test_cmp expect err
)
'
test_expect_success 'stash push reports a locked index' '
test_when_finished "rm -rf repo" &&
git init repo &&
(
cd repo &&
test_commit A A.file &&
echo change >A.file &&
touch .git/index.lock &&
cat >expect <<-EOF &&
error: could not write index
EOF
test_must_fail git stash push 2>err &&
test_cmp expect err
)
'
test_expect_success 'stash apply reports a locked index' '
test_when_finished "rm -rf repo" &&
git init repo &&
(
cd repo &&
test_commit A A.file &&
echo change >A.file &&
git stash push &&
touch .git/index.lock &&
cat >expect <<-EOF &&
error: could not write index
EOF
test_must_fail git stash apply 2>err &&
test_cmp expect err
)
'
test_done

View file

@ -24,6 +24,27 @@ pack_position () {
grep "$1" objects | cut -d" " -f1
}
# test_pack_objects_reused_all <pack-reused> <packs-reused>
test_pack_objects_reused_all () {
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --revs --all --delta-base-offset \
>/dev/null &&
test_pack_reused "$1" <trace2.txt &&
test_packs_reused "$2" <trace2.txt
}
# test_pack_objects_reused <pack-reused> <packs-reused>
test_pack_objects_reused () {
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --revs >/dev/null &&
test_pack_reused "$1" <trace2.txt &&
test_packs_reused "$2" <trace2.txt
}
test_expect_success 'preferred pack is reused for single-pack reuse' '
test_config pack.allowPackReuse single &&
@ -35,12 +56,24 @@ test_expect_success 'preferred pack is reused for single-pack reuse' '
git multi-pack-index write --bitmap &&
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --revs --all >/dev/null &&
test_pack_objects_reused_all 3 1
'
test_pack_reused 3 <trace2.txt &&
test_packs_reused 1 <trace2.txt
test_expect_success 'multi-pack reuse is disabled by default' '
test_pack_objects_reused_all 3 1
'
test_expect_success 'feature.experimental implies multi-pack reuse' '
test_config feature.experimental true &&
test_pack_objects_reused_all 6 2
'
test_expect_success 'multi-pack reuse can be disabled with feature.experimental' '
test_config feature.experimental true &&
test_config pack.allowPackReuse single &&
test_pack_objects_reused_all 3 1
'
test_expect_success 'enable multi-pack reuse' '
@ -58,21 +91,11 @@ test_expect_success 'reuse all objects from subset of bitmapped packs' '
^$(git rev-parse A)
EOF
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --revs <in >/dev/null &&
test_pack_reused 6 <trace2.txt &&
test_packs_reused 2 <trace2.txt
test_pack_objects_reused 6 2 <in
'
test_expect_success 'reuse all objects from all packs' '
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --revs --all >/dev/null &&
test_pack_reused 9 <trace2.txt &&
test_packs_reused 3 <trace2.txt
test_pack_objects_reused_all 9 3
'
test_expect_success 'reuse objects from first pack with middle gap' '
@ -105,12 +128,7 @@ test_expect_success 'reuse objects from first pack with middle gap' '
^$(git rev-parse D)
EOF
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --delta-base-offset --revs <in >/dev/null &&
test_pack_reused 3 <trace2.txt &&
test_packs_reused 1 <trace2.txt
test_pack_objects_reused 3 1 <in
'
test_expect_success 'reuse objects from middle pack with middle gap' '
@ -126,12 +144,7 @@ test_expect_success 'reuse objects from middle pack with middle gap' '
^$(git rev-parse D)
EOF
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --delta-base-offset --revs <in >/dev/null &&
test_pack_reused 3 <trace2.txt &&
test_packs_reused 1 <trace2.txt
test_pack_objects_reused 3 1 <in
'
test_expect_success 'omit delta with uninteresting base (same pack)' '
@ -161,10 +174,6 @@ test_expect_success 'omit delta with uninteresting base (same pack)' '
^$base
EOF
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --delta-base-offset --revs <in >/dev/null &&
# We can only reuse the 3 objects corresponding to "other" from
# the latest pack.
#
@ -176,8 +185,7 @@ test_expect_success 'omit delta with uninteresting base (same pack)' '
# The remaining objects from the other pack are similarly not
# reused because their objects are on the uninteresting side of
# the query.
test_pack_reused 3 <trace2.txt &&
test_packs_reused 1 <trace2.txt
test_pack_objects_reused 3 1 <in
'
test_expect_success 'omit delta from uninteresting base (cross pack)' '
@ -190,15 +198,10 @@ test_expect_success 'omit delta from uninteresting base (cross pack)' '
git multi-pack-index write --bitmap --preferred-pack="pack-$P.idx" &&
: >trace2.txt &&
GIT_TRACE2_EVENT="$PWD/trace2.txt" \
git pack-objects --stdout --delta-base-offset --all >/dev/null &&
packs_nr="$(find $packdir -type f -name "pack-*.pack" | wc -l)" &&
objects_nr="$(git rev-list --count --all --objects)" &&
test_pack_reused $(($objects_nr - 1)) <trace2.txt &&
test_packs_reused $packs_nr <trace2.txt
test_pack_objects_reused_all $(($objects_nr - 1)) $packs_nr
'
test_done

View file

@ -1356,6 +1356,52 @@ test_expect_success '--no-sort without subsequent --sort prints expected refs' '
test_cmp expected actual
'
test_expect_success 'set up custom date sorting' '
# Dates:
# - Wed Feb 07 2024 21:34:20 +0000
# - Tue Dec 14 1999 00:05:22 +0000
# - Fri Jun 04 2021 11:26:51 +0000
# - Mon Jan 22 2007 16:44:01 GMT+0000
i=1 &&
for when in 1707341660 945129922 1622806011 1169484241
do
GIT_COMMITTER_DATE="@$when +0000" \
GIT_COMMITTER_EMAIL="user@example.com" \
git tag -m "tag $when" custom-dates-$i &&
i=$(($i+1)) || return 1
done
'
test_expect_success 'sort by date defaults to full timestamp' '
cat >expected <<-\EOF &&
945129922 refs/tags/custom-dates-2
1169484241 refs/tags/custom-dates-4
1622806011 refs/tags/custom-dates-3
1707341660 refs/tags/custom-dates-1
EOF
git for-each-ref \
--format="%(creatordate:unix) %(refname)" \
--sort=creatordate \
"refs/tags/custom-dates-*" >actual &&
test_cmp expected actual
'
test_expect_success 'sort by custom date format' '
cat >expected <<-\EOF &&
00:05:22 refs/tags/custom-dates-2
11:26:51 refs/tags/custom-dates-3
16:44:01 refs/tags/custom-dates-4
21:34:20 refs/tags/custom-dates-1
EOF
git for-each-ref \
--format="%(creatordate:format:%H:%M:%S) %(refname)" \
--sort="creatordate:format:%H:%M:%S" \
"refs/tags/custom-dates-*" >actual &&
test_cmp expected actual
'
test_expect_success 'do not dereference NULL upon %(HEAD) on unborn branch' '
test_when_finished "git checkout main" &&
git for-each-ref --format="%(HEAD) %(refname:short)" refs/heads/ >actual &&

View file

@ -11,6 +11,11 @@ test_description='test bash completion'
# untraceable with such ancient Bash versions.
test_untraceable=UnfortunatelyYes
# Override environment and always use master for the default initial branch
# name for these tests, so that rev completion candidates are as expected.
GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME=master
export GIT_TEST_DEFAULT_INITIAL_BRANCH_NAME
. ./lib-bash.sh
complete ()
@ -1267,6 +1272,142 @@ test_expect_success 'git switch - with no options, complete local branches and u
EOF
'
test_expect_success 'git bisect - when not bisecting, complete only replay and start subcommands' '
test_completion "git bisect " <<-\EOF
replay Z
start Z
EOF
'
test_expect_success 'git bisect - complete options to start subcommand' '
test_completion "git bisect start --" <<-\EOF
--term-new Z
--term-bad Z
--term-old Z
--term-good Z
--no-checkout Z
--first-parent Z
EOF
'
test_expect_success 'setup for git-bisect tests requiring a repo' '
git init git-bisect &&
(
cd git-bisect &&
echo "initial contents" >file &&
git add file &&
git commit -am "Initial commit" &&
git tag initial &&
echo "new line" >>file &&
git commit -am "First change" &&
echo "another new line" >>file &&
git commit -am "Second change" &&
git tag final
)
'
test_expect_success 'git bisect - start subcommand arguments before double-dash are completed as revs' '
(
cd git-bisect &&
test_completion "git bisect start " <<-\EOF
HEAD Z
final Z
initial Z
master Z
EOF
)
'
# Note that these arguments are <pathspec>s, which in practice the fallback
# completion (not the git completion) later ends up completing as paths.
test_expect_success 'git bisect - start subcommand arguments after double-dash are not completed' '
(
cd git-bisect &&
test_completion "git bisect start final initial -- " ""
)
'
test_expect_success 'setup for git-bisect tests requiring ongoing bisection' '
(
cd git-bisect &&
git bisect start --term-new=custom_new --term-old=custom_old final initial
)
'
test_expect_success 'git-bisect - when bisecting all subcommands are candidates' '
(
cd git-bisect &&
test_completion "git bisect " <<-\EOF
start Z
bad Z
custom_new Z
custom_old Z
new Z
good Z
old Z
terms Z
skip Z
reset Z
visualize Z
replay Z
log Z
run Z
help Z
EOF
)
'
test_expect_success 'git-bisect - options to terms subcommand are candidates' '
(
cd git-bisect &&
test_completion "git bisect terms --" <<-\EOF
--term-bad Z
--term-good Z
--term-new Z
--term-old Z
EOF
)
'
test_expect_success 'git-bisect - git-log options to visualize subcommand are candidates' '
(
cd git-bisect &&
# The completion used for git-log and here does not complete
# every git-log option, so rather than hope to stay in sync
# with exactly what it does we will just spot-test here.
test_completion "git bisect visualize --sta" <<-\EOF &&
--stat Z
EOF
test_completion "git bisect visualize --summar" <<-\EOF
--summary Z
EOF
)
'
test_expect_success 'git-bisect - view subcommand is not a candidate' '
(
cd git-bisect &&
test_completion "git bisect vi" <<-\EOF
visualize Z
EOF
)
'
test_expect_success 'git-bisect - existing view subcommand is recognized and enables completion of git-log options' '
(
cd git-bisect &&
# The completion used for git-log and here does not complete
# every git-log option, so rather than hope to stay in sync
# with exactly what it does we will just spot-test here.
test_completion "git bisect view --sta" <<-\EOF &&
--stat Z
EOF
test_completion "git bisect view --summar" <<-\EOF
--summary Z
EOF
)
'
test_expect_success 'git checkout - completes refs and unique remote branches for DWIM' '
test_completion "git checkout " <<-\EOF
HEAD Z
@ -2583,6 +2724,35 @@ test_expect_success 'git config - variable name include' '
EOF
'
test_expect_success 'setup for git config submodule tests' '
test_create_repo sub &&
test_commit -C sub initial &&
git submodule add ./sub
'
test_expect_success 'git config - variable name - submodule and __git_compute_first_level_config_vars_for_section' '
test_completion "git config submodule." <<-\EOF
submodule.active Z
submodule.alternateErrorStrategy Z
submodule.alternateLocation Z
submodule.fetchJobs Z
submodule.propagateBranches Z
submodule.recurse Z
submodule.sub.Z
EOF
'
test_expect_success 'git config - variable name - __git_compute_second_level_config_vars_for_section' '
test_completion "git config submodule.sub." <<-\EOF
submodule.sub.url Z
submodule.sub.update Z
submodule.sub.branch Z
submodule.sub.fetchRecurseSubmodules Z
submodule.sub.ignore Z
submodule.sub.active Z
EOF
'
test_expect_success 'git config - value' '
test_completion "git config color.pager " <<-\EOF
false Z

View file

@ -21,12 +21,11 @@ static struct {
.result = RESULT_NONE,
};
#ifndef _MSC_VER
#define make_relative(location) location
#else
/*
* Visual C interpolates the absolute Windows path for `__FILE__`,
* but we want to see relative paths, as verified by t0080.
* There are other compilers that do the same, and are not for
* Windows.
*/
#include "dir.h"
@ -34,32 +33,66 @@ static const char *make_relative(const char *location)
{
static char prefix[] = __FILE__, buf[PATH_MAX], *p;
static size_t prefix_len;
static int need_bs_to_fs = -1;
if (!prefix_len) {
/* one-time preparation */
if (need_bs_to_fs < 0) {
size_t len = strlen(prefix);
const char *needle = "\\t\\unit-tests\\test-lib.c";
char needle[] = "t\\unit-tests\\test-lib.c";
size_t needle_len = strlen(needle);
if (len < needle_len || strcmp(needle, prefix + len - needle_len))
die("unexpected suffix of '%s'", prefix);
if (len < needle_len)
die("unexpected prefix '%s'", prefix);
/* let it end in a directory separator */
prefix_len = len - needle_len + 1;
/*
* The path could be relative (t/unit-tests/test-lib.c)
* or full (/home/user/git/t/unit-tests/test-lib.c).
* Check the slash between "t" and "unit-tests".
*/
prefix_len = len - needle_len;
if (prefix[prefix_len + 1] == '/') {
/* Oh, we're not Windows */
for (size_t i = 0; i < needle_len; i++)
if (needle[i] == '\\')
needle[i] = '/';
need_bs_to_fs = 0;
} else {
need_bs_to_fs = 1;
}
/*
* prefix_len == 0 if the compiler gives paths relative
* to the root of the working tree. Otherwise, we want
* to see that we did find the needle[] at a directory
* boundary. Again we rely on that needle[] begins with
* "t" followed by the directory separator.
*/
if (fspathcmp(needle, prefix + prefix_len) ||
(prefix_len && prefix[prefix_len - 1] != needle[1]))
die("unexpected suffix of '%s'", prefix);
}
/* Does it not start with the expected prefix? */
if (fspathncmp(location, prefix, prefix_len))
/*
* Does it not start with the expected prefix?
* Return it as-is without making it worse.
*/
if (prefix_len && fspathncmp(location, prefix, prefix_len))
return location;
strlcpy(buf, location + prefix_len, sizeof(buf));
/*
* If we do not need to munge directory separator, we can return
* the substring at the tail of the location.
*/
if (!need_bs_to_fs)
return location + prefix_len;
/* convert backslashes to forward slashes */
strlcpy(buf, location + prefix_len, sizeof(buf));
for (p = buf; *p; p++)
if (*p == '\\')
*p = '/';
return buf;
}
#endif
static void msg_with_prefix(const char *prefix, const char *format, va_list ap)
{

View file

@ -18,20 +18,20 @@
*/
void maybe_flush_or_die(FILE *f, const char *desc)
{
static int skip_stdout_flush = -1;
if (f == stdout) {
if (skip_stdout_flush < 0) {
skip_stdout_flush = git_env_bool("GIT_FLUSH", -1);
if (skip_stdout_flush < 0) {
static int force_flush_stdout = -1;
if (force_flush_stdout < 0) {
force_flush_stdout = git_env_bool("GIT_FLUSH", -1);
if (force_flush_stdout < 0) {
struct stat st;
if (fstat(fileno(stdout), &st))
skip_stdout_flush = 0;
force_flush_stdout = 1;
else
skip_stdout_flush = S_ISREG(st.st_mode);
force_flush_stdout = !S_ISREG(st.st_mode);
}
}
if (skip_stdout_flush && !ferror(f))
if (!force_flush_stdout && !ferror(f))
return;
}
if (fflush(f)) {