2006-05-17 02:02:14 +00:00
|
|
|
/*
|
|
|
|
* This handles recursive filename detection with exclude
|
|
|
|
* files, index knowledge etc..
|
|
|
|
*
|
|
|
|
* Copyright (C) Linus Torvalds, 2005-2006
|
|
|
|
* Junio Hamano, 2005-2006
|
|
|
|
*/
|
2023-02-24 00:09:24 +00:00
|
|
|
#include "git-compat-util.h"
|
2023-03-21 06:25:58 +00:00
|
|
|
#include "abspath.h"
|
2017-06-14 18:07:36 +00:00
|
|
|
#include "config.h"
|
2023-04-11 03:00:40 +00:00
|
|
|
#include "convert.h"
|
2006-05-17 02:02:14 +00:00
|
|
|
#include "dir.h"
|
2023-03-21 06:26:03 +00:00
|
|
|
#include "environment.h"
|
2023-03-21 06:25:54 +00:00
|
|
|
#include "gettext.h"
|
2023-05-16 06:33:50 +00:00
|
|
|
#include "name-hash.h"
|
2023-04-11 07:41:53 +00:00
|
|
|
#include "object-file.h"
|
2023-05-16 06:34:06 +00:00
|
|
|
#include "object-store-ll.h"
|
2023-05-16 06:33:59 +00:00
|
|
|
#include "path.h"
|
2007-04-11 21:49:44 +00:00
|
|
|
#include "refs.h"
|
2012-10-15 06:26:02 +00:00
|
|
|
#include "wildmatch.h"
|
2013-07-14 08:35:25 +00:00
|
|
|
#include "pathspec.h"
|
2015-04-16 17:45:29 +00:00
|
|
|
#include "utf8.h"
|
2015-03-08 10:12:33 +00:00
|
|
|
#include "varint.h"
|
|
|
|
#include "ewah/ewok.h"
|
2023-05-16 06:34:08 +00:00
|
|
|
#include "fsmonitor-ll.h"
|
|
|
|
#include "read-cache-ll.h"
|
2023-03-21 06:26:05 +00:00
|
|
|
#include "setup.h"
|
2023-05-16 06:33:51 +00:00
|
|
|
#include "sparse-index.h"
|
2018-03-28 22:35:31 +00:00
|
|
|
#include "submodule-config.h"
|
2023-04-22 20:17:09 +00:00
|
|
|
#include "symlinks.h"
|
2023-04-11 03:00:38 +00:00
|
|
|
#include "trace2.h"
|
2023-04-22 20:17:26 +00:00
|
|
|
#include "tree.h"
|
2006-05-17 02:02:14 +00:00
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
/*
|
|
|
|
* Tells read_directory_recursive how a file or directory should be treated.
|
|
|
|
* Values are ordered by significance, e.g. if a directory contains both
|
|
|
|
* excluded and untracked files, it is listed as untracked because
|
|
|
|
* path_untracked > path_excluded.
|
|
|
|
*/
|
|
|
|
enum path_treatment {
|
|
|
|
path_none = 0,
|
|
|
|
path_recurse,
|
|
|
|
path_excluded,
|
|
|
|
path_untracked
|
|
|
|
};
|
|
|
|
|
2015-03-08 10:12:28 +00:00
|
|
|
/*
|
|
|
|
* Support data structure for our opendir/readdir/closedir wrappers
|
|
|
|
*/
|
|
|
|
struct cached_dir {
|
|
|
|
DIR *fdir;
|
|
|
|
struct untracked_cache_dir *untracked;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
int nr_files;
|
|
|
|
int nr_dirs;
|
|
|
|
|
2020-01-16 20:21:55 +00:00
|
|
|
const char *d_name;
|
|
|
|
int d_type;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
const char *file;
|
|
|
|
struct untracked_cache_dir *ucd;
|
2015-03-08 10:12:28 +00:00
|
|
|
};
|
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
static enum path_treatment read_directory_recursive(struct dir_struct *dir,
|
2017-05-05 19:53:32 +00:00
|
|
|
struct index_state *istate, const char *path, int len,
|
|
|
|
struct untracked_cache_dir *untracked,
|
2017-09-18 17:24:33 +00:00
|
|
|
int check_only, int stop_at_first_file, const struct pathspec *pathspec);
|
2020-01-16 20:21:55 +00:00
|
|
|
static int resolve_dtype(int dtype, struct index_state *istate,
|
|
|
|
const char *path, int len);
|
2021-05-27 04:53:56 +00:00
|
|
|
struct dirent *readdir_skip_dot_and_dotdot(DIR *dirp)
|
2021-05-12 17:28:22 +00:00
|
|
|
{
|
|
|
|
struct dirent *e;
|
|
|
|
|
|
|
|
while ((e = readdir(dirp)) != NULL) {
|
|
|
|
if (!is_dot_or_dotdot(e->d_name))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return e;
|
|
|
|
}
|
|
|
|
|
2017-06-08 18:08:12 +00:00
|
|
|
int count_slashes(const char *s)
|
|
|
|
{
|
|
|
|
int cnt = 0;
|
|
|
|
while (*s)
|
|
|
|
if (*s++ == '/')
|
|
|
|
cnt++;
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
2016-04-22 13:01:24 +00:00
|
|
|
int fspathcmp(const char *a, const char *b)
|
2010-10-03 09:56:41 +00:00
|
|
|
{
|
|
|
|
return ignore_case ? strcasecmp(a, b) : strcmp(a, b);
|
|
|
|
}
|
|
|
|
|
2021-07-07 23:10:15 +00:00
|
|
|
int fspatheq(const char *a, const char *b)
|
|
|
|
{
|
|
|
|
return !fspathcmp(a, b);
|
|
|
|
}
|
|
|
|
|
2016-04-22 13:01:24 +00:00
|
|
|
int fspathncmp(const char *a, const char *b, size_t count)
|
2010-10-03 09:56:41 +00:00
|
|
|
{
|
|
|
|
return ignore_case ? strncasecmp(a, b, count) : strncmp(a, b, count);
|
|
|
|
}
|
|
|
|
|
2021-07-07 23:10:15 +00:00
|
|
|
unsigned int fspathhash(const char *str)
|
|
|
|
{
|
|
|
|
return ignore_case ? strihash(str) : strhash(str);
|
|
|
|
}
|
|
|
|
|
2014-03-29 15:39:00 +00:00
|
|
|
int git_fnmatch(const struct pathspec_item *item,
|
|
|
|
const char *pattern, const char *string,
|
|
|
|
int prefix)
|
2012-11-24 04:33:49 +00:00
|
|
|
{
|
|
|
|
if (prefix > 0) {
|
2013-07-14 08:36:09 +00:00
|
|
|
if (ps_strncmp(item, pattern, string, prefix))
|
2014-02-15 02:01:46 +00:00
|
|
|
return WM_NOMATCH;
|
2012-11-24 04:33:49 +00:00
|
|
|
pattern += prefix;
|
|
|
|
string += prefix;
|
|
|
|
}
|
2013-07-14 08:36:08 +00:00
|
|
|
if (item->flags & PATHSPEC_ONESTAR) {
|
2012-11-24 04:33:50 +00:00
|
|
|
int pattern_len = strlen(++pattern);
|
|
|
|
int string_len = strlen(string);
|
|
|
|
return string_len < pattern_len ||
|
2013-07-14 08:36:09 +00:00
|
|
|
ps_strcmp(item, pattern,
|
|
|
|
string + string_len - pattern_len);
|
2012-11-24 04:33:50 +00:00
|
|
|
}
|
2013-07-14 08:36:08 +00:00
|
|
|
if (item->magic & PATHSPEC_GLOB)
|
2013-07-14 08:36:09 +00:00
|
|
|
return wildmatch(pattern, string,
|
|
|
|
WM_PATHNAME |
|
2017-06-22 21:38:08 +00:00
|
|
|
(item->magic & PATHSPEC_ICASE ? WM_CASEFOLD : 0));
|
2013-07-14 08:36:08 +00:00
|
|
|
else
|
|
|
|
/* wildmatch has not learned no FNM_PATHNAME mode yet */
|
2014-02-15 02:01:46 +00:00
|
|
|
return wildmatch(pattern, string,
|
2017-06-22 21:38:08 +00:00
|
|
|
item->magic & PATHSPEC_ICASE ? WM_CASEFOLD : 0);
|
2012-11-24 04:33:49 +00:00
|
|
|
}
|
|
|
|
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-28 21:47:28 +00:00
|
|
|
static int fnmatch_icase_mem(const char *pattern, int patternlen,
|
|
|
|
const char *string, int stringlen,
|
|
|
|
int flags)
|
|
|
|
{
|
|
|
|
int match_status;
|
|
|
|
struct strbuf pat_buf = STRBUF_INIT;
|
|
|
|
struct strbuf str_buf = STRBUF_INIT;
|
|
|
|
const char *use_pat = pattern;
|
|
|
|
const char *use_str = string;
|
|
|
|
|
|
|
|
if (pattern[patternlen]) {
|
|
|
|
strbuf_add(&pat_buf, pattern, patternlen);
|
|
|
|
use_pat = pat_buf.buf;
|
|
|
|
}
|
|
|
|
if (string[stringlen]) {
|
|
|
|
strbuf_add(&str_buf, string, stringlen);
|
|
|
|
use_str = str_buf.buf;
|
|
|
|
}
|
|
|
|
|
2013-04-03 16:34:04 +00:00
|
|
|
if (ignore_case)
|
|
|
|
flags |= WM_CASEFOLD;
|
2017-06-22 21:38:08 +00:00
|
|
|
match_status = wildmatch(use_pat, use_str, flags);
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-28 21:47:28 +00:00
|
|
|
|
|
|
|
strbuf_release(&pat_buf);
|
|
|
|
strbuf_release(&str_buf);
|
|
|
|
|
|
|
|
return match_status;
|
|
|
|
}
|
|
|
|
|
2013-07-14 08:35:57 +00:00
|
|
|
static size_t common_prefix_len(const struct pathspec *pathspec)
|
2006-05-19 23:07:51 +00:00
|
|
|
{
|
2013-07-14 08:35:57 +00:00
|
|
|
int n;
|
2011-09-06 19:32:30 +00:00
|
|
|
size_t max = 0;
|
2006-05-19 23:07:51 +00:00
|
|
|
|
2013-07-14 08:36:09 +00:00
|
|
|
/*
|
|
|
|
* ":(icase)path" is treated as a pathspec full of
|
|
|
|
* wildcard. In other words, only prefix is considered common
|
|
|
|
* prefix. If the pathspec is abc/foo abc/bar, running in
|
2019-09-17 16:34:54 +00:00
|
|
|
* subdir xyz, the common prefix is still xyz, not xyz/abc as
|
2013-07-14 08:36:09 +00:00
|
|
|
* in non-:(icase).
|
|
|
|
*/
|
2013-07-14 08:36:06 +00:00
|
|
|
GUARD_PATHSPEC(pathspec,
|
|
|
|
PATHSPEC_FROMTOP |
|
|
|
|
PATHSPEC_MAXDEPTH |
|
2013-07-14 08:36:08 +00:00
|
|
|
PATHSPEC_LITERAL |
|
2013-07-14 08:36:09 +00:00
|
|
|
PATHSPEC_GLOB |
|
2013-12-06 07:30:48 +00:00
|
|
|
PATHSPEC_ICASE |
|
2017-03-13 18:23:21 +00:00
|
|
|
PATHSPEC_EXCLUDE |
|
|
|
|
PATHSPEC_ATTR);
|
2011-09-06 19:32:30 +00:00
|
|
|
|
2013-07-14 08:35:57 +00:00
|
|
|
for (n = 0; n < pathspec->nr; n++) {
|
2013-07-14 08:36:09 +00:00
|
|
|
size_t i = 0, len = 0, item_len;
|
2013-12-06 07:30:48 +00:00
|
|
|
if (pathspec->items[n].magic & PATHSPEC_EXCLUDE)
|
|
|
|
continue;
|
2013-07-14 08:36:09 +00:00
|
|
|
if (pathspec->items[n].magic & PATHSPEC_ICASE)
|
|
|
|
item_len = pathspec->items[n].prefix;
|
|
|
|
else
|
|
|
|
item_len = pathspec->items[n].nowildcard_len;
|
|
|
|
while (i < item_len && (n == 0 || i < max)) {
|
2013-07-14 08:35:57 +00:00
|
|
|
char c = pathspec->items[n].match[i];
|
|
|
|
if (c != pathspec->items[0].match[i])
|
2011-09-06 19:32:30 +00:00
|
|
|
break;
|
|
|
|
if (c == '/')
|
|
|
|
len = i + 1;
|
2013-07-14 08:35:57 +00:00
|
|
|
i++;
|
2011-09-06 19:32:30 +00:00
|
|
|
}
|
2013-07-14 08:35:57 +00:00
|
|
|
if (n == 0 || len < max) {
|
2011-09-06 19:32:30 +00:00
|
|
|
max = len;
|
|
|
|
if (!max)
|
|
|
|
break;
|
|
|
|
}
|
2006-05-19 23:07:51 +00:00
|
|
|
}
|
2011-09-06 19:32:30 +00:00
|
|
|
return max;
|
2006-05-19 23:07:51 +00:00
|
|
|
}
|
|
|
|
|
2011-09-04 10:42:01 +00:00
|
|
|
/*
|
|
|
|
* Returns a copy of the longest leading path common among all
|
|
|
|
* pathspecs.
|
|
|
|
*/
|
2013-07-14 08:35:57 +00:00
|
|
|
char *common_prefix(const struct pathspec *pathspec)
|
2011-09-04 10:42:01 +00:00
|
|
|
{
|
|
|
|
unsigned long len = common_prefix_len(pathspec);
|
|
|
|
|
2013-07-14 08:35:57 +00:00
|
|
|
return len ? xmemdupz(pathspec->items[0].match, len) : NULL;
|
2011-09-04 10:42:01 +00:00
|
|
|
}
|
|
|
|
|
2017-05-05 19:53:34 +00:00
|
|
|
int fill_directory(struct dir_struct *dir,
|
|
|
|
struct index_state *istate,
|
|
|
|
const struct pathspec *pathspec)
|
2009-05-14 20:22:36 +00:00
|
|
|
{
|
2017-02-07 22:04:25 +00:00
|
|
|
const char *prefix;
|
2017-01-04 18:03:58 +00:00
|
|
|
size_t prefix_len;
|
2009-05-14 20:22:36 +00:00
|
|
|
|
2020-06-11 06:59:31 +00:00
|
|
|
unsigned exclusive_flags = DIR_SHOW_IGNORED | DIR_SHOW_IGNORED_TOO;
|
|
|
|
if ((dir->flags & exclusive_flags) == exclusive_flags)
|
|
|
|
BUG("DIR_SHOW_IGNORED and DIR_SHOW_IGNORED_TOO are exclusive");
|
|
|
|
|
2009-05-14 20:22:36 +00:00
|
|
|
/*
|
|
|
|
* Calculate common prefix for the pathspec, and
|
|
|
|
* use that to optimize the directory walk
|
|
|
|
*/
|
2017-02-07 22:04:25 +00:00
|
|
|
prefix_len = common_prefix_len(pathspec);
|
|
|
|
prefix = prefix_len ? pathspec->items[0].match : "";
|
2009-05-14 20:22:36 +00:00
|
|
|
|
|
|
|
/* Read the directory and prune it */
|
2017-05-05 19:53:34 +00:00
|
|
|
read_directory(dir, istate, prefix, prefix_len, pathspec);
|
2017-01-04 18:03:58 +00:00
|
|
|
|
|
|
|
return prefix_len;
|
2009-05-14 20:22:36 +00:00
|
|
|
}
|
|
|
|
|
2010-12-15 15:02:44 +00:00
|
|
|
int within_depth(const char *name, int namelen,
|
|
|
|
int depth, int max_depth)
|
|
|
|
{
|
|
|
|
const char *cp = name, *cpe = name + namelen;
|
|
|
|
|
|
|
|
while (cp < cpe) {
|
|
|
|
if (*cp++ != '/')
|
|
|
|
continue;
|
|
|
|
depth++;
|
|
|
|
if (depth > max_depth)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2017-11-21 20:58:47 +00:00
|
|
|
/*
|
|
|
|
* Read the contents of the blob with the given OID into a buffer.
|
|
|
|
* Append a trailing LF to the end if the last line doesn't have one.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* -1 when the OID is invalid or unknown or does not refer to a blob.
|
|
|
|
* 0 when the blob is empty.
|
|
|
|
* 1 along with { data, size } of the (possibly augmented) buffer
|
|
|
|
* when successful.
|
|
|
|
*
|
2018-01-28 00:13:12 +00:00
|
|
|
* Optionally updates the given oid_stat with the given OID (when valid).
|
2017-11-21 20:58:47 +00:00
|
|
|
*/
|
2018-01-28 00:13:12 +00:00
|
|
|
static int do_read_blob(const struct object_id *oid, struct oid_stat *oid_stat,
|
|
|
|
size_t *size_out, char **data_out)
|
2017-11-21 20:58:47 +00:00
|
|
|
{
|
|
|
|
enum object_type type;
|
|
|
|
unsigned long sz;
|
|
|
|
char *data;
|
|
|
|
|
|
|
|
*size_out = 0;
|
|
|
|
*data_out = NULL;
|
|
|
|
|
2023-03-28 13:58:50 +00:00
|
|
|
data = repo_read_object_file(the_repository, oid, &type, &sz);
|
2017-11-21 20:58:47 +00:00
|
|
|
if (!data || type != OBJ_BLOB) {
|
|
|
|
free(data);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-01-28 00:13:12 +00:00
|
|
|
if (oid_stat) {
|
|
|
|
memset(&oid_stat->stat, 0, sizeof(oid_stat->stat));
|
|
|
|
oidcpy(&oid_stat->oid, oid);
|
2017-11-21 20:58:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (sz == 0) {
|
|
|
|
free(data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data[sz - 1] != '\n') {
|
|
|
|
data = xrealloc(data, st_add(sz, 1));
|
|
|
|
data[sz++] = '\n';
|
|
|
|
}
|
|
|
|
|
|
|
|
*size_out = xsize_t(sz);
|
|
|
|
*data_out = data;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-10-07 18:18:51 +00:00
|
|
|
#define DO_MATCH_EXCLUDE (1<<0)
|
|
|
|
#define DO_MATCH_DIRECTORY (1<<1)
|
2019-09-17 16:34:57 +00:00
|
|
|
#define DO_MATCH_LEADING_PATHSPEC (1<<2)
|
2014-01-24 13:40:31 +00:00
|
|
|
|
2010-12-15 15:02:48 +00:00
|
|
|
/*
|
2019-09-17 16:34:59 +00:00
|
|
|
* Does the given pathspec match the given name? A match is found if
|
2010-12-15 15:02:48 +00:00
|
|
|
*
|
2019-09-17 16:34:59 +00:00
|
|
|
* (1) the pathspec string is leading directory of 'name' ("RECURSIVELY"), or
|
|
|
|
* (2) the pathspec string has a leading part matching 'name' ("LEADING"), or
|
|
|
|
* (3) the pathspec string is a wildcard and matches 'name' ("WILDCARD"), or
|
|
|
|
* (4) the pathspec string is exactly the same as 'name' ("EXACT").
|
2010-12-15 15:02:48 +00:00
|
|
|
*
|
2019-09-17 16:34:59 +00:00
|
|
|
* Return value tells which case it was (1-4), or 0 when there is no match.
|
2010-12-15 15:02:48 +00:00
|
|
|
*
|
2019-09-17 16:34:59 +00:00
|
|
|
* It may be instructive to look at a small table of concrete examples
|
|
|
|
* to understand the differences between 1, 2, and 4:
|
|
|
|
*
|
|
|
|
* Pathspecs
|
|
|
|
* | a/b | a/b/ | a/b/c
|
|
|
|
* ------+-----------+-----------+------------
|
|
|
|
* a/b | EXACT | EXACT[1] | LEADING[2]
|
|
|
|
* Names a/b/ | RECURSIVE | EXACT | LEADING[2]
|
|
|
|
* a/b/c | RECURSIVE | RECURSIVE | EXACT
|
|
|
|
*
|
|
|
|
* [1] Only if DO_MATCH_DIRECTORY is passed; otherwise, this is NOT a match.
|
|
|
|
* [2] Only if DO_MATCH_LEADING_PATHSPEC is passed; otherwise, not a match.
|
2010-12-15 15:02:48 +00:00
|
|
|
*/
|
2021-04-01 01:49:39 +00:00
|
|
|
static int match_pathspec_item(struct index_state *istate,
|
2018-08-13 16:14:22 +00:00
|
|
|
const struct pathspec_item *item, int prefix,
|
2014-01-24 13:40:31 +00:00
|
|
|
const char *name, int namelen, unsigned flags)
|
2010-12-15 15:02:48 +00:00
|
|
|
{
|
|
|
|
/* name/namelen has prefix cut off by caller */
|
|
|
|
const char *match = item->match + prefix;
|
|
|
|
int matchlen = item->len - prefix;
|
|
|
|
|
2013-07-14 08:36:09 +00:00
|
|
|
/*
|
|
|
|
* The normal call pattern is:
|
|
|
|
* 1. prefix = common_prefix_len(ps);
|
|
|
|
* 2. prune something, or fill_directory
|
2014-01-24 13:40:30 +00:00
|
|
|
* 3. match_pathspec()
|
2013-07-14 08:36:09 +00:00
|
|
|
*
|
|
|
|
* 'prefix' at #1 may be shorter than the command's prefix and
|
|
|
|
* it's ok for #2 to match extra files. Those extras will be
|
|
|
|
* trimmed at #3.
|
|
|
|
*
|
|
|
|
* Suppose the pathspec is 'foo' and '../bar' running from
|
|
|
|
* subdir 'xyz'. The common prefix at #1 will be empty, thanks
|
|
|
|
* to "../". We may have xyz/foo _and_ XYZ/foo after #2. The
|
|
|
|
* user does not want XYZ/foo, only the "foo" part should be
|
|
|
|
* case-insensitive. We need to filter out XYZ/foo here. In
|
|
|
|
* other words, we do not trust the caller on comparing the
|
|
|
|
* prefix part when :(icase) is involved. We do exact
|
|
|
|
* comparison ourselves.
|
|
|
|
*
|
|
|
|
* Normally the caller (common_prefix_len() in fact) does
|
|
|
|
* _exact_ matching on name[-prefix+1..-1] and we do not need
|
|
|
|
* to check that part. Be defensive and check it anyway, in
|
|
|
|
* case common_prefix_len is changed, or a new caller is
|
|
|
|
* introduced that does not use common_prefix_len.
|
|
|
|
*
|
|
|
|
* If the penalty turns out too high when prefix is really
|
|
|
|
* long, maybe change it to
|
|
|
|
* strncmp(match, name, item->prefix - prefix)
|
|
|
|
*/
|
|
|
|
if (item->prefix && (item->magic & PATHSPEC_ICASE) &&
|
|
|
|
strncmp(item->match, name - prefix, item->prefix))
|
|
|
|
return 0;
|
|
|
|
|
2018-11-18 16:47:59 +00:00
|
|
|
if (item->attr_match_nr &&
|
dir: match "attr" pathspec magic with correct paths
The match_pathspec_item() function takes "prefix" value, allowing a
caller to chop off the common leading prefix of pathspec pattern
strings from the path and only use the remainder of the path to
match the pathspec patterns (after chopping the same leading prefix
of them, of course).
This "common leading prefix" optimization has two main features:
* discard the entries in the in-core index that are outside of the
common leading prefix; if you are doing "ls-files one/a one/b",
we know all matches must be from "one/", so first the code
discards all entries outside the "one/" directory from the
in-core index. This allows us to work on a smaller dataset.
* allow skipping the comparison of the leading bytes when matching
pathspec with path. When "ls-files" finds the path "one/a/1" in
the in-core index given "one/a" and "one/b" as the pathspec,
knowing that common leading prefix "one/" was found lets the
pathspec matchinery not to bother comparing "one/" part, and
allows it to feed "a/1" down, as long as the pathspec element
"one/a" gets corresponding adjustment to "a".
When the "attr" pathspec magic is in effect, however, the current
code breaks down.
The attributes, other than the ones that are built-in and the ones
that come from the $GIT_DIR/info/attributes file and the top-level
.gitattributes file, are lazily read from the filesystem on-demand,
as we encounter each path and ask if it matches the pathspec. For
example, if you say "git ls-files "(attr:label)sub/" in a repository
with a file "sub/file" that is given the 'label' attribute in
"sub/.gitattributes":
* The common prefix optimization finds that "sub/" is the common
prefix and prunes the in-core index so that it has only entries
inside that directory. This is desirable.
* The code then walks the in-core index, finds "sub/file", and
eventually asks do_match_pathspec() if it matches the given
pathspec.
* do_match_pathspec() calls match_pathspec_item() _after_ stripping
the common prefix "sub/" from the path, giving it "file", plus
the length of the common prefix (4-bytes), so that the pathspec
element "(attr:label)sub/" can be treated as if it were "(attr:label)".
The last one is what breaks the match in the current code, as the
pathspec subsystem ends up asking the attribute subsystem to find
the attribute attached to the path "file". We need to ask about the
attributes on "sub/file" when calling match_pathspec_attrs(); this
can be done by looking at "prefix" bytes before the beginning of
"name", which is the same trick already used by another piece of the
code in the same match_pathspec_item() function.
Unfortunately this was not discovered so far because the code works
with slightly different arguments, e.g.
$ git ls-files "(attr:label)sub"
$ git ls-files "(attr:label)sub/" "no/such/dir/"
would have reported "sub/file" as a path with the 'label' attribute
just fine, because neither would trigger the common prefix
optimization.
Reported-by: Matthew Hughes <mhughes@uw.co.uk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-07-08 21:35:33 +00:00
|
|
|
!match_pathspec_attrs(istate, name - prefix, namelen + prefix, item))
|
2017-03-13 18:23:21 +00:00
|
|
|
return 0;
|
|
|
|
|
2010-12-15 15:02:48 +00:00
|
|
|
/* If the match was just the prefix, we matched */
|
|
|
|
if (!*match)
|
|
|
|
return MATCHED_RECURSIVELY;
|
|
|
|
|
2013-07-14 08:36:09 +00:00
|
|
|
if (matchlen <= namelen && !ps_strncmp(item, match, name, matchlen)) {
|
2010-12-15 15:02:48 +00:00
|
|
|
if (matchlen == namelen)
|
|
|
|
return MATCHED_EXACTLY;
|
|
|
|
|
|
|
|
if (match[matchlen-1] == '/' || name[matchlen] == '/')
|
|
|
|
return MATCHED_RECURSIVELY;
|
2014-01-24 13:40:32 +00:00
|
|
|
} else if ((flags & DO_MATCH_DIRECTORY) &&
|
|
|
|
match[matchlen - 1] == '/' &&
|
|
|
|
namelen == matchlen - 1 &&
|
|
|
|
!ps_strncmp(item, match, name, namelen))
|
|
|
|
return MATCHED_EXACTLY;
|
2010-12-15 15:02:48 +00:00
|
|
|
|
2012-11-24 04:33:49 +00:00
|
|
|
if (item->nowildcard_len < item->len &&
|
2013-07-14 08:36:08 +00:00
|
|
|
!git_fnmatch(item, match, name,
|
2012-11-24 04:33:50 +00:00
|
|
|
item->nowildcard_len - prefix))
|
2010-12-15 15:02:48 +00:00
|
|
|
return MATCHED_FNMATCH;
|
|
|
|
|
2019-09-17 16:34:59 +00:00
|
|
|
/* Perform checks to see if "name" is a leading string of the pathspec */
|
dir: fix treatment of negated pathspecs
do_match_pathspec() started life as match_pathspec_depth_1() and for
correctness was only supposed to be called from match_pathspec_depth().
match_pathspec_depth() was later renamed to match_pathspec(), so the
invariant we expect today is that do_match_pathspec() has no direct
callers outside of match_pathspec().
Unfortunately, this intention was lost with the renames of the two
functions, and additional calls to do_match_pathspec() were added in
commits 75a6315f74 ("ls-files: add pathspec matching for submodules",
2016-10-07) and 89a1f4aaf7 ("dir: if our pathspec might match files
under a dir, recurse into it", 2019-09-17). Of course,
do_match_pathspec() had an important advantge over match_pathspec() --
match_pathspec() would hardcode flags to one of two values, and these
new callers needed to pass some other value for flags. Also, although
calling do_match_pathspec() directly was incorrect, there likely wasn't
any difference in the observable end output, because the bug just meant
that fill_diretory() would recurse into unneeded directories. Since
subsequent does-this-path-match checks on individual paths under the
directory would cause those extra paths to be filtered out, the only
difference from using the wrong function was unnecessary computation.
The second of those bad calls to do_match_pathspec() was involved -- via
either direct movement or via copying+editing -- into a number of later
refactors. See commits 777b420347 ("dir: synchronize
treat_leading_path() and read_directory_recursive()", 2019-12-19),
8d92fb2927 ("dir: replace exponential algorithm with a linear one",
2020-04-01), and 95c11ecc73 ("Fix error-prone fill_directory() API; make
it only return matches", 2020-04-01). The last of those introduced the
usage of do_match_pathspec() on an individual file, and thus resulted in
individual paths being returned that shouldn't be.
The problem with calling do_match_pathspec() instead of match_pathspec()
is that any negated patterns such as ':!unwanted_path` will be ignored.
Add a new match_pathspec_with_flags() function to fulfill the needs of
specifying special flags while still correctly checking negated
patterns, add a big comment above do_match_pathspec() to prevent others
from misusing it, and correct current callers of do_match_pathspec() to
instead use either match_pathspec() or match_pathspec_with_flags().
One final note is that DO_MATCH_LEADING_PATHSPEC needs special
consideration when working with DO_MATCH_EXCLUDE. The point of
DO_MATCH_LEADING_PATHSPEC is that if we have a pathspec like
*/Makefile
and we are checking a directory path like
src/module/component
that we want to consider it a match so that we recurse into the
directory because it _might_ have a file named Makefile somewhere below.
However, when we are using an exclusion pattern, i.e. we have a pathspec
like
:(exclude)*/Makefile
we do NOT want to say that a directory path like
src/module/component
is a (negative) match. While there *might* be a file named 'Makefile'
somewhere below that directory, there could also be other files and we
cannot pre-emptively rule all the files under that directory out; we
need to recurse and then check individual files. Adjust the
DO_MATCH_LEADING_PATHSPEC logic to only get activated for positive
pathspecs.
Reported-by: John Millikin <jmillikin@stripe.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-05 18:23:48 +00:00
|
|
|
if ( (flags & DO_MATCH_LEADING_PATHSPEC) &&
|
|
|
|
!(flags & DO_MATCH_EXCLUDE)) {
|
2016-10-07 18:18:51 +00:00
|
|
|
/* name is a literal prefix of the pathspec */
|
2019-09-17 16:34:55 +00:00
|
|
|
int offset = name[namelen-1] == '/' ? 1 : 0;
|
2016-10-07 18:18:51 +00:00
|
|
|
if ((namelen < matchlen) &&
|
2019-09-17 16:34:55 +00:00
|
|
|
(match[namelen-offset] == '/') &&
|
2016-10-07 18:18:51 +00:00
|
|
|
!ps_strncmp(item, match, name, namelen))
|
dir: if our pathspec might match files under a dir, recurse into it
For git clean, if a directory is entirely untracked and the user did not
specify -d (corresponding to DIR_SHOW_IGNORED_TOO), then we usually do
not want to remove that directory and thus do not recurse into it.
However, if the user manually specified specific (or even globbed) paths
somewhere under that directory to remove, then we need to recurse into
the directory to make sure we remove the relevant paths under that
directory as the user requested.
Note that this does not mean that the recursed-into directory will be
added to dir->entries for later removal; as of a few commits earlier in
this series, there is another more strict match check that is run after
returning from a recursed-into directory before deciding to add it to the
list of entries. Therefore, this will only result in files underneath
the given directory which match one of the pathspecs being added to the
entries list.
Two notes of potential interest to future readers:
* If we wanted to only recurse into a directory when it is specifically
matched rather than matched-via-glob (e.g. '*.c'), then we could do
so via making the final non-zero return in match_pathspec_item be
MATCHED_RECURSIVELY instead of MATCHED_RECURSIVELY_LEADING_PATHSPEC.
(Note that the relative order of MATCHED_RECURSIVELY_LEADING_PATHSPEC
and MATCHED_RECURSIVELY are important for such a change.) I was
leaving open that possibility while writing an RFC asking for the
behavior we want, but even though we don't want it, that knowledge
might help you understand the code flow better.
* There is a growing amount of logic in read_directory_recursive() for
deciding whether to recurse into a subdirectory. However, there is a
comment immediately preceding this logic that says to recurse if
instructed by treat_path(). It may be better for the logic in
read_directory_recursive() to ultimately be moved to treat_path() (or
another function it calls, such as treat_directory()), but I have
left that for someone else to tackle in the future.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-17 16:34:58 +00:00
|
|
|
return MATCHED_RECURSIVELY_LEADING_PATHSPEC;
|
2016-10-07 18:18:51 +00:00
|
|
|
|
2019-12-10 20:00:22 +00:00
|
|
|
/* name doesn't match up to the first wild character */
|
2016-10-07 18:18:51 +00:00
|
|
|
if (item->nowildcard_len < item->len &&
|
|
|
|
ps_strncmp(item, match, name,
|
|
|
|
item->nowildcard_len - prefix))
|
|
|
|
return 0;
|
|
|
|
|
2019-12-10 20:00:23 +00:00
|
|
|
/*
|
|
|
|
* name has no wildcard, and it didn't match as a leading
|
|
|
|
* pathspec so return.
|
|
|
|
*/
|
|
|
|
if (item->nowildcard_len == item->len)
|
|
|
|
return 0;
|
|
|
|
|
2016-10-07 18:18:51 +00:00
|
|
|
/*
|
|
|
|
* Here is where we would perform a wildmatch to check if
|
|
|
|
* "name" can be matched as a directory (or a prefix) against
|
|
|
|
* the pathspec. Since wildmatch doesn't have this capability
|
|
|
|
* at the present we have to punt and say that it is a match,
|
|
|
|
* potentially returning a false positive
|
|
|
|
* The submodules themselves will be able to perform more
|
|
|
|
* accurate matching to determine if the pathspec matches.
|
|
|
|
*/
|
dir: if our pathspec might match files under a dir, recurse into it
For git clean, if a directory is entirely untracked and the user did not
specify -d (corresponding to DIR_SHOW_IGNORED_TOO), then we usually do
not want to remove that directory and thus do not recurse into it.
However, if the user manually specified specific (or even globbed) paths
somewhere under that directory to remove, then we need to recurse into
the directory to make sure we remove the relevant paths under that
directory as the user requested.
Note that this does not mean that the recursed-into directory will be
added to dir->entries for later removal; as of a few commits earlier in
this series, there is another more strict match check that is run after
returning from a recursed-into directory before deciding to add it to the
list of entries. Therefore, this will only result in files underneath
the given directory which match one of the pathspecs being added to the
entries list.
Two notes of potential interest to future readers:
* If we wanted to only recurse into a directory when it is specifically
matched rather than matched-via-glob (e.g. '*.c'), then we could do
so via making the final non-zero return in match_pathspec_item be
MATCHED_RECURSIVELY instead of MATCHED_RECURSIVELY_LEADING_PATHSPEC.
(Note that the relative order of MATCHED_RECURSIVELY_LEADING_PATHSPEC
and MATCHED_RECURSIVELY are important for such a change.) I was
leaving open that possibility while writing an RFC asking for the
behavior we want, but even though we don't want it, that knowledge
might help you understand the code flow better.
* There is a growing amount of logic in read_directory_recursive() for
deciding whether to recurse into a subdirectory. However, there is a
comment immediately preceding this logic that says to recurse if
instructed by treat_path(). It may be better for the logic in
read_directory_recursive() to ultimately be moved to treat_path() (or
another function it calls, such as treat_directory()), but I have
left that for someone else to tackle in the future.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-17 16:34:58 +00:00
|
|
|
return MATCHED_RECURSIVELY_LEADING_PATHSPEC;
|
2016-10-07 18:18:51 +00:00
|
|
|
}
|
|
|
|
|
2010-12-15 15:02:48 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
dir: fix treatment of negated pathspecs
do_match_pathspec() started life as match_pathspec_depth_1() and for
correctness was only supposed to be called from match_pathspec_depth().
match_pathspec_depth() was later renamed to match_pathspec(), so the
invariant we expect today is that do_match_pathspec() has no direct
callers outside of match_pathspec().
Unfortunately, this intention was lost with the renames of the two
functions, and additional calls to do_match_pathspec() were added in
commits 75a6315f74 ("ls-files: add pathspec matching for submodules",
2016-10-07) and 89a1f4aaf7 ("dir: if our pathspec might match files
under a dir, recurse into it", 2019-09-17). Of course,
do_match_pathspec() had an important advantge over match_pathspec() --
match_pathspec() would hardcode flags to one of two values, and these
new callers needed to pass some other value for flags. Also, although
calling do_match_pathspec() directly was incorrect, there likely wasn't
any difference in the observable end output, because the bug just meant
that fill_diretory() would recurse into unneeded directories. Since
subsequent does-this-path-match checks on individual paths under the
directory would cause those extra paths to be filtered out, the only
difference from using the wrong function was unnecessary computation.
The second of those bad calls to do_match_pathspec() was involved -- via
either direct movement or via copying+editing -- into a number of later
refactors. See commits 777b420347 ("dir: synchronize
treat_leading_path() and read_directory_recursive()", 2019-12-19),
8d92fb2927 ("dir: replace exponential algorithm with a linear one",
2020-04-01), and 95c11ecc73 ("Fix error-prone fill_directory() API; make
it only return matches", 2020-04-01). The last of those introduced the
usage of do_match_pathspec() on an individual file, and thus resulted in
individual paths being returned that shouldn't be.
The problem with calling do_match_pathspec() instead of match_pathspec()
is that any negated patterns such as ':!unwanted_path` will be ignored.
Add a new match_pathspec_with_flags() function to fulfill the needs of
specifying special flags while still correctly checking negated
patterns, add a big comment above do_match_pathspec() to prevent others
from misusing it, and correct current callers of do_match_pathspec() to
instead use either match_pathspec() or match_pathspec_with_flags().
One final note is that DO_MATCH_LEADING_PATHSPEC needs special
consideration when working with DO_MATCH_EXCLUDE. The point of
DO_MATCH_LEADING_PATHSPEC is that if we have a pathspec like
*/Makefile
and we are checking a directory path like
src/module/component
that we want to consider it a match so that we recurse into the
directory because it _might_ have a file named Makefile somewhere below.
However, when we are using an exclusion pattern, i.e. we have a pathspec
like
:(exclude)*/Makefile
we do NOT want to say that a directory path like
src/module/component
is a (negative) match. While there *might* be a file named 'Makefile'
somewhere below that directory, there could also be other files and we
cannot pre-emptively rule all the files under that directory out; we
need to recurse and then check individual files. Adjust the
DO_MATCH_LEADING_PATHSPEC logic to only get activated for positive
pathspecs.
Reported-by: John Millikin <jmillikin@stripe.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-05 18:23:48 +00:00
|
|
|
* do_match_pathspec() is meant to ONLY be called by
|
|
|
|
* match_pathspec_with_flags(); calling it directly risks pathspecs
|
|
|
|
* like ':!unwanted_path' being ignored.
|
|
|
|
*
|
2013-01-06 16:58:06 +00:00
|
|
|
* Given a name and a list of pathspecs, returns the nature of the
|
|
|
|
* closest (i.e. most specific) match of the name to any of the
|
|
|
|
* pathspecs.
|
|
|
|
*
|
|
|
|
* The caller typically calls this multiple times with the same
|
|
|
|
* pathspec and seen[] array but with different name/namelen
|
|
|
|
* (e.g. entries from the index) and is interested in seeing if and
|
|
|
|
* how each pathspec matches all the names it calls this function
|
|
|
|
* with. A mark is left in the seen[] array for each pathspec element
|
|
|
|
* indicating the closest type of match that element achieved, so if
|
|
|
|
* seen[n] remains zero after multiple invocations, that means the nth
|
|
|
|
* pathspec did not match any names, which could indicate that the
|
|
|
|
* user mistyped the nth pathspec.
|
2010-12-15 15:02:48 +00:00
|
|
|
*/
|
2021-04-01 01:49:39 +00:00
|
|
|
static int do_match_pathspec(struct index_state *istate,
|
2018-08-13 16:14:22 +00:00
|
|
|
const struct pathspec *ps,
|
2014-01-24 13:40:30 +00:00
|
|
|
const char *name, int namelen,
|
|
|
|
int prefix, char *seen,
|
2014-01-24 13:40:31 +00:00
|
|
|
unsigned flags)
|
2010-12-15 15:02:48 +00:00
|
|
|
{
|
2014-01-24 13:40:31 +00:00
|
|
|
int i, retval = 0, exclude = flags & DO_MATCH_EXCLUDE;
|
2010-12-15 15:02:48 +00:00
|
|
|
|
2013-07-14 08:36:06 +00:00
|
|
|
GUARD_PATHSPEC(ps,
|
|
|
|
PATHSPEC_FROMTOP |
|
|
|
|
PATHSPEC_MAXDEPTH |
|
2013-07-14 08:36:08 +00:00
|
|
|
PATHSPEC_LITERAL |
|
2013-07-14 08:36:09 +00:00
|
|
|
PATHSPEC_GLOB |
|
2013-12-06 07:30:48 +00:00
|
|
|
PATHSPEC_ICASE |
|
2017-03-13 18:23:21 +00:00
|
|
|
PATHSPEC_EXCLUDE |
|
|
|
|
PATHSPEC_ATTR);
|
2013-07-14 08:35:36 +00:00
|
|
|
|
2010-12-15 15:02:48 +00:00
|
|
|
if (!ps->nr) {
|
2013-07-14 08:35:32 +00:00
|
|
|
if (!ps->recursive ||
|
|
|
|
!(ps->magic & PATHSPEC_MAXDEPTH) ||
|
|
|
|
ps->max_depth == -1)
|
2010-12-15 15:02:48 +00:00
|
|
|
return MATCHED_RECURSIVELY;
|
|
|
|
|
|
|
|
if (within_depth(name, namelen, 0, ps->max_depth))
|
|
|
|
return MATCHED_EXACTLY;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
name += prefix;
|
|
|
|
namelen -= prefix;
|
|
|
|
|
|
|
|
for (i = ps->nr - 1; i >= 0; i--) {
|
|
|
|
int how;
|
2013-12-06 07:30:48 +00:00
|
|
|
|
|
|
|
if ((!exclude && ps->items[i].magic & PATHSPEC_EXCLUDE) ||
|
|
|
|
( exclude && !(ps->items[i].magic & PATHSPEC_EXCLUDE)))
|
|
|
|
continue;
|
|
|
|
|
2010-12-15 15:02:48 +00:00
|
|
|
if (seen && seen[i] == MATCHED_EXACTLY)
|
|
|
|
continue;
|
2013-12-06 07:30:48 +00:00
|
|
|
/*
|
|
|
|
* Make exclude patterns optional and never report
|
|
|
|
* "pathspec ':(exclude)foo' matches no files"
|
|
|
|
*/
|
|
|
|
if (seen && ps->items[i].magic & PATHSPEC_EXCLUDE)
|
|
|
|
seen[i] = MATCHED_FNMATCH;
|
2018-08-13 16:14:22 +00:00
|
|
|
how = match_pathspec_item(istate, ps->items+i, prefix, name,
|
2014-01-24 13:40:31 +00:00
|
|
|
namelen, flags);
|
2013-07-14 08:35:32 +00:00
|
|
|
if (ps->recursive &&
|
|
|
|
(ps->magic & PATHSPEC_MAXDEPTH) &&
|
|
|
|
ps->max_depth != -1 &&
|
2010-12-15 15:02:48 +00:00
|
|
|
how && how != MATCHED_FNMATCH) {
|
|
|
|
int len = ps->items[i].len;
|
|
|
|
if (name[len] == '/')
|
|
|
|
len++;
|
|
|
|
if (within_depth(name+len, namelen-len, 0, ps->max_depth))
|
|
|
|
how = MATCHED_EXACTLY;
|
|
|
|
else
|
|
|
|
how = 0;
|
|
|
|
}
|
|
|
|
if (how) {
|
|
|
|
if (retval < how)
|
|
|
|
retval = how;
|
|
|
|
if (seen && seen[i] < how)
|
|
|
|
seen[i] = how;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2021-04-01 01:49:39 +00:00
|
|
|
static int match_pathspec_with_flags(struct index_state *istate,
|
dir: fix treatment of negated pathspecs
do_match_pathspec() started life as match_pathspec_depth_1() and for
correctness was only supposed to be called from match_pathspec_depth().
match_pathspec_depth() was later renamed to match_pathspec(), so the
invariant we expect today is that do_match_pathspec() has no direct
callers outside of match_pathspec().
Unfortunately, this intention was lost with the renames of the two
functions, and additional calls to do_match_pathspec() were added in
commits 75a6315f74 ("ls-files: add pathspec matching for submodules",
2016-10-07) and 89a1f4aaf7 ("dir: if our pathspec might match files
under a dir, recurse into it", 2019-09-17). Of course,
do_match_pathspec() had an important advantge over match_pathspec() --
match_pathspec() would hardcode flags to one of two values, and these
new callers needed to pass some other value for flags. Also, although
calling do_match_pathspec() directly was incorrect, there likely wasn't
any difference in the observable end output, because the bug just meant
that fill_diretory() would recurse into unneeded directories. Since
subsequent does-this-path-match checks on individual paths under the
directory would cause those extra paths to be filtered out, the only
difference from using the wrong function was unnecessary computation.
The second of those bad calls to do_match_pathspec() was involved -- via
either direct movement or via copying+editing -- into a number of later
refactors. See commits 777b420347 ("dir: synchronize
treat_leading_path() and read_directory_recursive()", 2019-12-19),
8d92fb2927 ("dir: replace exponential algorithm with a linear one",
2020-04-01), and 95c11ecc73 ("Fix error-prone fill_directory() API; make
it only return matches", 2020-04-01). The last of those introduced the
usage of do_match_pathspec() on an individual file, and thus resulted in
individual paths being returned that shouldn't be.
The problem with calling do_match_pathspec() instead of match_pathspec()
is that any negated patterns such as ':!unwanted_path` will be ignored.
Add a new match_pathspec_with_flags() function to fulfill the needs of
specifying special flags while still correctly checking negated
patterns, add a big comment above do_match_pathspec() to prevent others
from misusing it, and correct current callers of do_match_pathspec() to
instead use either match_pathspec() or match_pathspec_with_flags().
One final note is that DO_MATCH_LEADING_PATHSPEC needs special
consideration when working with DO_MATCH_EXCLUDE. The point of
DO_MATCH_LEADING_PATHSPEC is that if we have a pathspec like
*/Makefile
and we are checking a directory path like
src/module/component
that we want to consider it a match so that we recurse into the
directory because it _might_ have a file named Makefile somewhere below.
However, when we are using an exclusion pattern, i.e. we have a pathspec
like
:(exclude)*/Makefile
we do NOT want to say that a directory path like
src/module/component
is a (negative) match. While there *might* be a file named 'Makefile'
somewhere below that directory, there could also be other files and we
cannot pre-emptively rule all the files under that directory out; we
need to recurse and then check individual files. Adjust the
DO_MATCH_LEADING_PATHSPEC logic to only get activated for positive
pathspecs.
Reported-by: John Millikin <jmillikin@stripe.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-05 18:23:48 +00:00
|
|
|
const struct pathspec *ps,
|
|
|
|
const char *name, int namelen,
|
|
|
|
int prefix, char *seen, unsigned flags)
|
2013-12-06 07:30:48 +00:00
|
|
|
{
|
|
|
|
int positive, negative;
|
2018-08-13 16:14:22 +00:00
|
|
|
positive = do_match_pathspec(istate, ps, name, namelen,
|
2014-01-24 13:40:31 +00:00
|
|
|
prefix, seen, flags);
|
2013-12-06 07:30:48 +00:00
|
|
|
if (!(ps->magic & PATHSPEC_EXCLUDE) || !positive)
|
|
|
|
return positive;
|
2018-08-13 16:14:22 +00:00
|
|
|
negative = do_match_pathspec(istate, ps, name, namelen,
|
2014-01-24 13:40:31 +00:00
|
|
|
prefix, seen,
|
|
|
|
flags | DO_MATCH_EXCLUDE);
|
2013-12-06 07:30:48 +00:00
|
|
|
return negative ? 0 : positive;
|
|
|
|
}
|
|
|
|
|
2021-04-01 01:49:39 +00:00
|
|
|
int match_pathspec(struct index_state *istate,
|
dir: fix treatment of negated pathspecs
do_match_pathspec() started life as match_pathspec_depth_1() and for
correctness was only supposed to be called from match_pathspec_depth().
match_pathspec_depth() was later renamed to match_pathspec(), so the
invariant we expect today is that do_match_pathspec() has no direct
callers outside of match_pathspec().
Unfortunately, this intention was lost with the renames of the two
functions, and additional calls to do_match_pathspec() were added in
commits 75a6315f74 ("ls-files: add pathspec matching for submodules",
2016-10-07) and 89a1f4aaf7 ("dir: if our pathspec might match files
under a dir, recurse into it", 2019-09-17). Of course,
do_match_pathspec() had an important advantge over match_pathspec() --
match_pathspec() would hardcode flags to one of two values, and these
new callers needed to pass some other value for flags. Also, although
calling do_match_pathspec() directly was incorrect, there likely wasn't
any difference in the observable end output, because the bug just meant
that fill_diretory() would recurse into unneeded directories. Since
subsequent does-this-path-match checks on individual paths under the
directory would cause those extra paths to be filtered out, the only
difference from using the wrong function was unnecessary computation.
The second of those bad calls to do_match_pathspec() was involved -- via
either direct movement or via copying+editing -- into a number of later
refactors. See commits 777b420347 ("dir: synchronize
treat_leading_path() and read_directory_recursive()", 2019-12-19),
8d92fb2927 ("dir: replace exponential algorithm with a linear one",
2020-04-01), and 95c11ecc73 ("Fix error-prone fill_directory() API; make
it only return matches", 2020-04-01). The last of those introduced the
usage of do_match_pathspec() on an individual file, and thus resulted in
individual paths being returned that shouldn't be.
The problem with calling do_match_pathspec() instead of match_pathspec()
is that any negated patterns such as ':!unwanted_path` will be ignored.
Add a new match_pathspec_with_flags() function to fulfill the needs of
specifying special flags while still correctly checking negated
patterns, add a big comment above do_match_pathspec() to prevent others
from misusing it, and correct current callers of do_match_pathspec() to
instead use either match_pathspec() or match_pathspec_with_flags().
One final note is that DO_MATCH_LEADING_PATHSPEC needs special
consideration when working with DO_MATCH_EXCLUDE. The point of
DO_MATCH_LEADING_PATHSPEC is that if we have a pathspec like
*/Makefile
and we are checking a directory path like
src/module/component
that we want to consider it a match so that we recurse into the
directory because it _might_ have a file named Makefile somewhere below.
However, when we are using an exclusion pattern, i.e. we have a pathspec
like
:(exclude)*/Makefile
we do NOT want to say that a directory path like
src/module/component
is a (negative) match. While there *might* be a file named 'Makefile'
somewhere below that directory, there could also be other files and we
cannot pre-emptively rule all the files under that directory out; we
need to recurse and then check individual files. Adjust the
DO_MATCH_LEADING_PATHSPEC logic to only get activated for positive
pathspecs.
Reported-by: John Millikin <jmillikin@stripe.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-05 18:23:48 +00:00
|
|
|
const struct pathspec *ps,
|
|
|
|
const char *name, int namelen,
|
|
|
|
int prefix, char *seen, int is_dir)
|
|
|
|
{
|
|
|
|
unsigned flags = is_dir ? DO_MATCH_DIRECTORY : 0;
|
|
|
|
return match_pathspec_with_flags(istate, ps, name, namelen,
|
|
|
|
prefix, seen, flags);
|
|
|
|
}
|
|
|
|
|
2016-10-07 18:18:51 +00:00
|
|
|
/**
|
|
|
|
* Check if a submodule is a superset of the pathspec
|
|
|
|
*/
|
2021-04-01 01:49:39 +00:00
|
|
|
int submodule_path_match(struct index_state *istate,
|
2018-08-13 16:14:22 +00:00
|
|
|
const struct pathspec *ps,
|
2016-10-07 18:18:51 +00:00
|
|
|
const char *submodule_name,
|
|
|
|
char *seen)
|
|
|
|
{
|
dir: fix treatment of negated pathspecs
do_match_pathspec() started life as match_pathspec_depth_1() and for
correctness was only supposed to be called from match_pathspec_depth().
match_pathspec_depth() was later renamed to match_pathspec(), so the
invariant we expect today is that do_match_pathspec() has no direct
callers outside of match_pathspec().
Unfortunately, this intention was lost with the renames of the two
functions, and additional calls to do_match_pathspec() were added in
commits 75a6315f74 ("ls-files: add pathspec matching for submodules",
2016-10-07) and 89a1f4aaf7 ("dir: if our pathspec might match files
under a dir, recurse into it", 2019-09-17). Of course,
do_match_pathspec() had an important advantge over match_pathspec() --
match_pathspec() would hardcode flags to one of two values, and these
new callers needed to pass some other value for flags. Also, although
calling do_match_pathspec() directly was incorrect, there likely wasn't
any difference in the observable end output, because the bug just meant
that fill_diretory() would recurse into unneeded directories. Since
subsequent does-this-path-match checks on individual paths under the
directory would cause those extra paths to be filtered out, the only
difference from using the wrong function was unnecessary computation.
The second of those bad calls to do_match_pathspec() was involved -- via
either direct movement or via copying+editing -- into a number of later
refactors. See commits 777b420347 ("dir: synchronize
treat_leading_path() and read_directory_recursive()", 2019-12-19),
8d92fb2927 ("dir: replace exponential algorithm with a linear one",
2020-04-01), and 95c11ecc73 ("Fix error-prone fill_directory() API; make
it only return matches", 2020-04-01). The last of those introduced the
usage of do_match_pathspec() on an individual file, and thus resulted in
individual paths being returned that shouldn't be.
The problem with calling do_match_pathspec() instead of match_pathspec()
is that any negated patterns such as ':!unwanted_path` will be ignored.
Add a new match_pathspec_with_flags() function to fulfill the needs of
specifying special flags while still correctly checking negated
patterns, add a big comment above do_match_pathspec() to prevent others
from misusing it, and correct current callers of do_match_pathspec() to
instead use either match_pathspec() or match_pathspec_with_flags().
One final note is that DO_MATCH_LEADING_PATHSPEC needs special
consideration when working with DO_MATCH_EXCLUDE. The point of
DO_MATCH_LEADING_PATHSPEC is that if we have a pathspec like
*/Makefile
and we are checking a directory path like
src/module/component
that we want to consider it a match so that we recurse into the
directory because it _might_ have a file named Makefile somewhere below.
However, when we are using an exclusion pattern, i.e. we have a pathspec
like
:(exclude)*/Makefile
we do NOT want to say that a directory path like
src/module/component
is a (negative) match. While there *might* be a file named 'Makefile'
somewhere below that directory, there could also be other files and we
cannot pre-emptively rule all the files under that directory out; we
need to recurse and then check individual files. Adjust the
DO_MATCH_LEADING_PATHSPEC logic to only get activated for positive
pathspecs.
Reported-by: John Millikin <jmillikin@stripe.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-05 18:23:48 +00:00
|
|
|
int matched = match_pathspec_with_flags(istate, ps, submodule_name,
|
|
|
|
strlen(submodule_name),
|
|
|
|
0, seen,
|
|
|
|
DO_MATCH_DIRECTORY |
|
|
|
|
DO_MATCH_LEADING_PATHSPEC);
|
2016-10-07 18:18:51 +00:00
|
|
|
return matched;
|
|
|
|
}
|
|
|
|
|
2015-03-24 21:12:10 +00:00
|
|
|
int report_path_error(const char *ps_matched,
|
2019-03-20 08:15:48 +00:00
|
|
|
const struct pathspec *pathspec)
|
2015-03-24 21:12:10 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Make sure all pathspec matched; otherwise it is an error.
|
|
|
|
*/
|
|
|
|
int num, errors = 0;
|
|
|
|
for (num = 0; num < pathspec->nr; num++) {
|
|
|
|
int other, found_dup;
|
|
|
|
|
|
|
|
if (ps_matched[num])
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* The caller might have fed identical pathspec
|
|
|
|
* twice. Do not barf on such a mistake.
|
|
|
|
* FIXME: parse_pathspec should have eliminated
|
|
|
|
* duplicate pathspec.
|
|
|
|
*/
|
|
|
|
for (found_dup = other = 0;
|
|
|
|
!found_dup && other < pathspec->nr;
|
|
|
|
other++) {
|
|
|
|
if (other == num || !ps_matched[other])
|
|
|
|
continue;
|
|
|
|
if (!strcmp(pathspec->items[other].original,
|
|
|
|
pathspec->items[num].original))
|
|
|
|
/*
|
|
|
|
* Ok, we have a match already.
|
|
|
|
*/
|
|
|
|
found_dup = 1;
|
|
|
|
}
|
|
|
|
if (found_dup)
|
|
|
|
continue;
|
|
|
|
|
2018-07-21 07:49:30 +00:00
|
|
|
error(_("pathspec '%s' did not match any file(s) known to git"),
|
2015-03-24 21:12:10 +00:00
|
|
|
pathspec->items[num].original);
|
|
|
|
errors++;
|
|
|
|
}
|
|
|
|
return errors;
|
|
|
|
}
|
|
|
|
|
2012-06-07 07:53:35 +00:00
|
|
|
/*
|
|
|
|
* Return the length of the "simple" part of a path match limiter.
|
|
|
|
*/
|
2013-07-14 08:35:28 +00:00
|
|
|
int simple_length(const char *match)
|
2012-06-07 07:53:35 +00:00
|
|
|
{
|
|
|
|
int len = -1;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
unsigned char c = *match++;
|
|
|
|
len++;
|
|
|
|
if (c == '\0' || is_glob_special(c))
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-14 08:35:28 +00:00
|
|
|
int no_wildcard(const char *string)
|
2007-10-28 20:27:13 +00:00
|
|
|
{
|
2012-06-07 07:53:35 +00:00
|
|
|
return string[simple_length(string)] == '\0';
|
2007-10-28 20:27:13 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
void parse_path_pattern(const char **pattern,
|
2012-10-15 06:24:39 +00:00
|
|
|
int *patternlen,
|
2016-03-01 17:02:59 +00:00
|
|
|
unsigned *flags,
|
2012-10-15 06:24:39 +00:00
|
|
|
int *nowildcardlen)
|
2012-10-15 06:24:38 +00:00
|
|
|
{
|
|
|
|
const char *p = *pattern;
|
|
|
|
size_t i, len;
|
|
|
|
|
|
|
|
*flags = 0;
|
|
|
|
if (*p == '!') {
|
2019-09-03 18:04:56 +00:00
|
|
|
*flags |= PATTERN_FLAG_NEGATIVE;
|
2012-10-15 06:24:38 +00:00
|
|
|
p++;
|
|
|
|
}
|
|
|
|
len = strlen(p);
|
|
|
|
if (len && p[len - 1] == '/') {
|
|
|
|
len--;
|
2019-09-03 18:04:56 +00:00
|
|
|
*flags |= PATTERN_FLAG_MUSTBEDIR;
|
2012-10-15 06:24:38 +00:00
|
|
|
}
|
|
|
|
for (i = 0; i < len; i++) {
|
|
|
|
if (p[i] == '/')
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (i == len)
|
2019-09-03 18:04:56 +00:00
|
|
|
*flags |= PATTERN_FLAG_NODIR;
|
2012-10-15 06:24:38 +00:00
|
|
|
*nowildcardlen = simple_length(p);
|
|
|
|
/*
|
|
|
|
* we should have excluded the trailing slash from 'p' too,
|
|
|
|
* but that's one more allocation. Instead just make sure
|
|
|
|
* nowildcardlen does not exceed real patternlen
|
|
|
|
*/
|
|
|
|
if (*nowildcardlen > len)
|
|
|
|
*nowildcardlen = len;
|
|
|
|
if (*p == '*' && no_wildcard(p + 1))
|
2019-09-03 18:04:56 +00:00
|
|
|
*flags |= PATTERN_FLAG_ENDSWITH;
|
2012-10-15 06:24:38 +00:00
|
|
|
*pattern = p;
|
|
|
|
*patternlen = len;
|
|
|
|
}
|
|
|
|
|
2022-08-25 17:09:48 +00:00
|
|
|
int pl_hashmap_cmp(const void *cmp_data UNUSED,
|
2019-11-21 22:04:42 +00:00
|
|
|
const struct hashmap_entry *a,
|
|
|
|
const struct hashmap_entry *b,
|
2022-08-25 17:09:48 +00:00
|
|
|
const void *key UNUSED)
|
2019-11-21 22:04:41 +00:00
|
|
|
{
|
|
|
|
const struct pattern_entry *ee1 =
|
|
|
|
container_of(a, struct pattern_entry, ent);
|
|
|
|
const struct pattern_entry *ee2 =
|
|
|
|
container_of(b, struct pattern_entry, ent);
|
|
|
|
|
|
|
|
size_t min_len = ee1->patternlen <= ee2->patternlen
|
|
|
|
? ee1->patternlen
|
|
|
|
: ee2->patternlen;
|
|
|
|
|
2022-10-08 11:05:43 +00:00
|
|
|
return fspathncmp(ee1->pattern, ee2->pattern, min_len);
|
2019-11-21 22:04:41 +00:00
|
|
|
}
|
|
|
|
|
2020-01-31 20:16:09 +00:00
|
|
|
static char *dup_and_filter_pattern(const char *pattern)
|
|
|
|
{
|
|
|
|
char *set, *read;
|
|
|
|
size_t count = 0;
|
|
|
|
char *result = xstrdup(pattern);
|
|
|
|
|
|
|
|
set = result;
|
|
|
|
read = result;
|
|
|
|
|
|
|
|
while (*read) {
|
|
|
|
/* skip escape characters (once) */
|
|
|
|
if (*read == '\\')
|
|
|
|
read++;
|
|
|
|
|
|
|
|
*set = *read;
|
|
|
|
|
|
|
|
set++;
|
|
|
|
read++;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
*set = 0;
|
|
|
|
|
|
|
|
if (count > 2 &&
|
|
|
|
*(set - 1) == '*' &&
|
|
|
|
*(set - 2) == '/')
|
|
|
|
*(set - 2) = 0;
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2019-11-21 22:04:41 +00:00
|
|
|
static void add_pattern_to_hashsets(struct pattern_list *pl, struct path_pattern *given)
|
|
|
|
{
|
|
|
|
struct pattern_entry *translated;
|
|
|
|
char *truncated;
|
|
|
|
char *data = NULL;
|
2020-01-31 20:16:08 +00:00
|
|
|
const char *prev, *cur, *next;
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
if (!pl->use_cone_patterns)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (given->flags & PATTERN_FLAG_NEGATIVE &&
|
|
|
|
given->flags & PATTERN_FLAG_MUSTBEDIR &&
|
|
|
|
!strcmp(given->pattern, "/*")) {
|
|
|
|
pl->full_cone = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!given->flags && !strcmp(given->pattern, "/*")) {
|
|
|
|
pl->full_cone = 1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
sparse-checkout: allow one-character directories in cone mode
In 9e6d3e64 (sparse-checkout: detect short patterns, 2020-01-24), a
condition on the minimum length of a cone-mode pattern was introduced.
However, this condition was off-by-one.
If we have a directory with a single character, say "b", then the
command
git sparse-checkout set b
will correctly add the pattern "/b/" to the sparse-checkout file. When
this is interpeted in dir.c, the pattern is "/b" with the
PATTERN_FLAG_MUSTBEDIR flag. This string has length two, which satisfies
our inclusive inequality (<= 2).
The reason for this inequality is that we will start to read the pattern
string character-by-character using three char pointers: prev, cur,
next. In particular, next is set to the current pattern plus two. The
mistake was that next will still be a valid pointer when the pattern
length is two, since the string is null-terminated.
Make this inequality strict so these patterns work.
Signed-off-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-02-20 20:07:06 +00:00
|
|
|
if (given->patternlen < 2 ||
|
2021-12-16 16:13:42 +00:00
|
|
|
*given->pattern != '/' ||
|
2020-01-24 21:19:37 +00:00
|
|
|
strstr(given->pattern, "**")) {
|
2020-01-24 21:19:36 +00:00
|
|
|
/* Not a cone pattern. */
|
|
|
|
warning(_("unrecognized pattern: '%s'"), given->pattern);
|
|
|
|
goto clear_hashmaps;
|
|
|
|
}
|
|
|
|
|
dir: check for single file cone patterns
The sparse checkout documentation states that the cone mode pattern set
is limited to patterns that either recursively include directories or
patterns that match all files in a directory. In the sparse checkout
file, the former manifest in the form:
/A/B/C/
while the latter become a pair of patterns either in the form:
/A/B/
!/A/B/*/
or in the special case of matching the toplevel files:
/*
!/*/
The 'add_pattern_to_hashsets()' function contains checks which serve to
disable cone-mode when non-cone patterns are encountered. However, these
do not catch when the pattern list attempts to match a single file or
directory, e.g. a pattern in the form:
/A/B/C
This causes sparse-checkout to exhibit unexpected behaviour when such a
pattern is in the sparse-checkout file and cone mode is enabled.
Concretely, with the pattern like the above, sparse-checkout, in
non-cone mode, will only include the directory or file located at
'/A/B/C'. However, with cone mode enabled, sparse-checkout will instead
just manifest the toplevel files but not any file located at '/A/B/C'.
Relatedly, issues occur when supplying the same kind of filter when
partial cloning with '--filter=sparse:oid=<oid>'. 'upload-pack' will
correctly just include the objects that match the non-cone pattern
matching. Which means that checking out the newly cloned repo with the
same filter, but with cone mode enabled, fails due to missing objects.
To fix these issues, add a cone mode pattern check that asserts that
every pattern is either a directory match or the pattern '/*'. Add a
test to verify the new pattern check and modify another to reflect that
non-directory patterns are caught earlier.
Signed-off-by: William Sprent <williams@unity3d.com>
Acked-by: Victoria Dye <vdye@github.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-01-03 08:20:59 +00:00
|
|
|
if (!(given->flags & PATTERN_FLAG_MUSTBEDIR) &&
|
|
|
|
strcmp(given->pattern, "/*")) {
|
|
|
|
/* Not a cone pattern. */
|
|
|
|
warning(_("unrecognized pattern: '%s'"), given->pattern);
|
|
|
|
goto clear_hashmaps;
|
|
|
|
}
|
|
|
|
|
2020-01-31 20:16:08 +00:00
|
|
|
prev = given->pattern;
|
|
|
|
cur = given->pattern + 1;
|
|
|
|
next = given->pattern + 2;
|
|
|
|
|
|
|
|
while (*cur) {
|
|
|
|
/* Watch for glob characters '*', '\', '[', '?' */
|
|
|
|
if (!is_glob_special(*cur))
|
|
|
|
goto increment;
|
|
|
|
|
|
|
|
/* But only if *prev != '\\' */
|
|
|
|
if (*prev == '\\')
|
|
|
|
goto increment;
|
|
|
|
|
|
|
|
/* But allow the initial '\' */
|
|
|
|
if (*cur == '\\' &&
|
|
|
|
is_glob_special(*next))
|
|
|
|
goto increment;
|
|
|
|
|
|
|
|
/* But a trailing '/' then '*' is fine */
|
|
|
|
if (*prev == '/' &&
|
|
|
|
*cur == '*' &&
|
|
|
|
*next == 0)
|
|
|
|
goto increment;
|
|
|
|
|
|
|
|
/* Not a cone pattern. */
|
|
|
|
warning(_("unrecognized pattern: '%s'"), given->pattern);
|
|
|
|
goto clear_hashmaps;
|
|
|
|
|
|
|
|
increment:
|
|
|
|
prev++;
|
|
|
|
cur++;
|
|
|
|
next++;
|
|
|
|
}
|
|
|
|
|
2019-11-21 22:04:41 +00:00
|
|
|
if (given->patternlen > 2 &&
|
|
|
|
!strcmp(given->pattern + given->patternlen - 2, "/*")) {
|
|
|
|
if (!(given->flags & PATTERN_FLAG_NEGATIVE)) {
|
|
|
|
/* Not a cone pattern. */
|
|
|
|
warning(_("unrecognized pattern: '%s'"), given->pattern);
|
|
|
|
goto clear_hashmaps;
|
|
|
|
}
|
|
|
|
|
2020-01-31 20:16:09 +00:00
|
|
|
truncated = dup_and_filter_pattern(given->pattern);
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
translated = xmalloc(sizeof(struct pattern_entry));
|
|
|
|
translated->pattern = truncated;
|
|
|
|
translated->patternlen = given->patternlen - 2;
|
|
|
|
hashmap_entry_init(&translated->ent,
|
2021-07-30 19:06:58 +00:00
|
|
|
fspathhash(translated->pattern));
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
if (!hashmap_get_entry(&pl->recursive_hashmap,
|
|
|
|
translated, ent, NULL)) {
|
|
|
|
/* We did not see the "parent" included */
|
|
|
|
warning(_("unrecognized negative pattern: '%s'"),
|
|
|
|
given->pattern);
|
|
|
|
free(truncated);
|
|
|
|
free(translated);
|
|
|
|
goto clear_hashmaps;
|
|
|
|
}
|
|
|
|
|
|
|
|
hashmap_add(&pl->parent_hashmap, &translated->ent);
|
|
|
|
hashmap_remove(&pl->recursive_hashmap, &translated->ent, &data);
|
|
|
|
free(data);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (given->flags & PATTERN_FLAG_NEGATIVE) {
|
|
|
|
warning(_("unrecognized negative pattern: '%s'"),
|
|
|
|
given->pattern);
|
|
|
|
goto clear_hashmaps;
|
|
|
|
}
|
|
|
|
|
|
|
|
translated = xmalloc(sizeof(struct pattern_entry));
|
|
|
|
|
2020-01-31 20:16:09 +00:00
|
|
|
translated->pattern = dup_and_filter_pattern(given->pattern);
|
2019-11-21 22:04:41 +00:00
|
|
|
translated->patternlen = given->patternlen;
|
|
|
|
hashmap_entry_init(&translated->ent,
|
2021-07-30 19:06:58 +00:00
|
|
|
fspathhash(translated->pattern));
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
hashmap_add(&pl->recursive_hashmap, &translated->ent);
|
|
|
|
|
|
|
|
if (hashmap_get_entry(&pl->parent_hashmap, translated, ent, NULL)) {
|
|
|
|
/* we already included this at the parent level */
|
|
|
|
warning(_("your sparse-checkout file may have issues: pattern '%s' is repeated"),
|
|
|
|
given->pattern);
|
2021-12-16 16:13:40 +00:00
|
|
|
goto clear_hashmaps;
|
2019-11-21 22:04:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
clear_hashmaps:
|
|
|
|
warning(_("disabling cone pattern matching"));
|
2020-11-02 18:55:05 +00:00
|
|
|
hashmap_clear_and_free(&pl->parent_hashmap, struct pattern_entry, ent);
|
|
|
|
hashmap_clear_and_free(&pl->recursive_hashmap, struct pattern_entry, ent);
|
2019-11-21 22:04:41 +00:00
|
|
|
pl->use_cone_patterns = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hashmap_contains_path(struct hashmap *map,
|
|
|
|
struct strbuf *pattern)
|
|
|
|
{
|
|
|
|
struct pattern_entry p;
|
|
|
|
|
|
|
|
/* Check straight mapping */
|
|
|
|
p.pattern = pattern->buf;
|
|
|
|
p.patternlen = pattern->len;
|
2021-07-30 19:06:58 +00:00
|
|
|
hashmap_entry_init(&p.ent, fspathhash(p.pattern));
|
2019-11-21 22:04:41 +00:00
|
|
|
return !!hashmap_get_entry(map, &p, ent, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int hashmap_contains_parent(struct hashmap *map,
|
|
|
|
const char *path,
|
|
|
|
struct strbuf *buffer)
|
|
|
|
{
|
|
|
|
char *slash_pos;
|
|
|
|
|
|
|
|
strbuf_setlen(buffer, 0);
|
|
|
|
|
|
|
|
if (path[0] != '/')
|
|
|
|
strbuf_addch(buffer, '/');
|
|
|
|
|
|
|
|
strbuf_addstr(buffer, path);
|
|
|
|
|
|
|
|
slash_pos = strrchr(buffer->buf, '/');
|
|
|
|
|
|
|
|
while (slash_pos > buffer->buf) {
|
|
|
|
strbuf_setlen(buffer, slash_pos - buffer->buf);
|
|
|
|
|
|
|
|
if (hashmap_contains_path(map, buffer))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
slash_pos = strrchr(buffer->buf, '/');
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
void add_pattern(const char *string, const char *base,
|
2019-09-03 18:04:56 +00:00
|
|
|
int baselen, struct pattern_list *pl, int srcpos)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
2019-09-03 18:04:55 +00:00
|
|
|
struct path_pattern *pattern;
|
2012-10-15 06:24:38 +00:00
|
|
|
int patternlen;
|
2016-03-01 17:02:59 +00:00
|
|
|
unsigned flags;
|
2012-10-15 06:24:38 +00:00
|
|
|
int nowildcardlen;
|
2006-05-17 02:02:14 +00:00
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
parse_path_pattern(&string, &patternlen, &flags, &nowildcardlen);
|
2019-09-03 18:04:56 +00:00
|
|
|
if (flags & PATTERN_FLAG_MUSTBEDIR) {
|
2019-09-03 18:04:55 +00:00
|
|
|
FLEXPTR_ALLOC_MEM(pattern, pattern, string, patternlen);
|
2008-01-31 09:17:48 +00:00
|
|
|
} else {
|
2019-09-03 18:04:55 +00:00
|
|
|
pattern = xmalloc(sizeof(*pattern));
|
|
|
|
pattern->pattern = string;
|
2008-01-31 09:17:48 +00:00
|
|
|
}
|
2019-09-03 18:04:55 +00:00
|
|
|
pattern->patternlen = patternlen;
|
|
|
|
pattern->nowildcardlen = nowildcardlen;
|
|
|
|
pattern->base = base;
|
|
|
|
pattern->baselen = baselen;
|
|
|
|
pattern->flags = flags;
|
|
|
|
pattern->srcpos = srcpos;
|
2019-09-03 18:04:56 +00:00
|
|
|
ALLOC_GROW(pl->patterns, pl->nr + 1, pl->alloc);
|
|
|
|
pl->patterns[pl->nr++] = pattern;
|
|
|
|
pattern->pl = pl;
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
add_pattern_to_hashsets(pl, pattern);
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
|
|
|
|
2021-04-01 01:49:39 +00:00
|
|
|
static int read_skip_worktree_file_from_index(struct index_state *istate,
|
2017-11-21 20:58:47 +00:00
|
|
|
const char *path,
|
2018-01-28 00:13:12 +00:00
|
|
|
size_t *size_out, char **data_out,
|
|
|
|
struct oid_stat *oid_stat)
|
2009-08-20 13:47:01 +00:00
|
|
|
{
|
|
|
|
int pos, len;
|
|
|
|
|
|
|
|
len = strlen(path);
|
2017-05-05 19:53:22 +00:00
|
|
|
pos = index_name_pos(istate, path, len);
|
2009-08-20 13:47:01 +00:00
|
|
|
if (pos < 0)
|
2017-11-21 20:58:47 +00:00
|
|
|
return -1;
|
2017-05-05 19:53:22 +00:00
|
|
|
if (!ce_skip_worktree(istate->cache[pos]))
|
2017-11-21 20:58:47 +00:00
|
|
|
return -1;
|
|
|
|
|
2018-01-28 00:13:12 +00:00
|
|
|
return do_read_blob(&istate->cache[pos]->oid, oid_stat, size_out, data_out);
|
2009-08-20 13:47:01 +00:00
|
|
|
}
|
|
|
|
|
2012-12-27 02:32:29 +00:00
|
|
|
/*
|
2019-09-03 18:04:56 +00:00
|
|
|
* Frees memory within pl which was allocated for exclude patterns and
|
|
|
|
* the file buffer. Does not free pl itself.
|
2012-12-27 02:32:29 +00:00
|
|
|
*/
|
2019-09-03 18:04:57 +00:00
|
|
|
void clear_pattern_list(struct pattern_list *pl)
|
2010-11-26 18:17:44 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-09-03 18:04:56 +00:00
|
|
|
for (i = 0; i < pl->nr; i++)
|
|
|
|
free(pl->patterns[i]);
|
|
|
|
free(pl->patterns);
|
|
|
|
free(pl->filebuf);
|
2020-11-02 18:55:05 +00:00
|
|
|
hashmap_clear_and_free(&pl->recursive_hashmap, struct pattern_entry, ent);
|
|
|
|
hashmap_clear_and_free(&pl->parent_hashmap, struct pattern_entry, ent);
|
2010-11-26 18:17:44 +00:00
|
|
|
|
2019-09-03 18:04:56 +00:00
|
|
|
memset(pl, 0, sizeof(*pl));
|
2010-11-26 18:17:44 +00:00
|
|
|
}
|
|
|
|
|
2014-02-09 00:26:38 +00:00
|
|
|
static void trim_trailing_spaces(char *buf)
|
2014-02-09 00:26:37 +00:00
|
|
|
{
|
2014-06-02 22:36:56 +00:00
|
|
|
char *p, *last_space = NULL;
|
|
|
|
|
|
|
|
for (p = buf; *p; p++)
|
|
|
|
switch (*p) {
|
|
|
|
case ' ':
|
|
|
|
if (!last_space)
|
|
|
|
last_space = p;
|
|
|
|
break;
|
|
|
|
case '\\':
|
|
|
|
p++;
|
|
|
|
if (!*p)
|
|
|
|
return;
|
|
|
|
/* fallthrough */
|
|
|
|
default:
|
|
|
|
last_space = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (last_space)
|
|
|
|
*last_space = '\0';
|
2014-02-09 00:26:37 +00:00
|
|
|
}
|
|
|
|
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
/*
|
|
|
|
* Given a subdirectory name and "dir" of the current directory,
|
|
|
|
* search the subdir in "dir" and return it, or create a new one if it
|
|
|
|
* does not exist in "dir".
|
|
|
|
*
|
|
|
|
* If "name" has the trailing slash, it'll be excluded in the search.
|
|
|
|
*/
|
|
|
|
static struct untracked_cache_dir *lookup_untracked(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir,
|
|
|
|
const char *name, int len)
|
|
|
|
{
|
|
|
|
int first, last;
|
|
|
|
struct untracked_cache_dir *d;
|
|
|
|
if (!dir)
|
|
|
|
return NULL;
|
|
|
|
if (len && name[len - 1] == '/')
|
|
|
|
len--;
|
|
|
|
first = 0;
|
|
|
|
last = dir->dirs_nr;
|
|
|
|
while (last > first) {
|
2019-06-13 17:51:56 +00:00
|
|
|
int cmp, next = first + ((last - first) >> 1);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
d = dir->dirs[next];
|
|
|
|
cmp = strncmp(name, d->name, len);
|
|
|
|
if (!cmp && strlen(d->name) > len)
|
|
|
|
cmp = -1;
|
|
|
|
if (!cmp)
|
|
|
|
return d;
|
|
|
|
if (cmp < 0) {
|
|
|
|
last = next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
first = next+1;
|
|
|
|
}
|
|
|
|
|
|
|
|
uc->dir_created++;
|
2016-02-22 22:44:32 +00:00
|
|
|
FLEX_ALLOC_MEM(d, name, name, len);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
|
|
|
|
ALLOC_GROW(dir->dirs, dir->dirs_nr + 1, dir->dirs_alloc);
|
2018-01-22 17:50:09 +00:00
|
|
|
MOVE_ARRAY(dir->dirs + first + 1, dir->dirs + first,
|
|
|
|
dir->dirs_nr - first);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
dir->dirs_nr++;
|
|
|
|
dir->dirs[first] = d;
|
|
|
|
return d;
|
|
|
|
}
|
|
|
|
|
2015-03-08 10:12:26 +00:00
|
|
|
static void do_invalidate_gitignore(struct untracked_cache_dir *dir)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
dir->valid = 0;
|
|
|
|
dir->untracked_nr = 0;
|
|
|
|
for (i = 0; i < dir->dirs_nr; i++)
|
|
|
|
do_invalidate_gitignore(dir->dirs[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void invalidate_gitignore(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir)
|
|
|
|
{
|
|
|
|
uc->gitignore_invalidated++;
|
|
|
|
do_invalidate_gitignore(dir);
|
|
|
|
}
|
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
static void invalidate_directory(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir)
|
|
|
|
{
|
2015-03-08 10:12:30 +00:00
|
|
|
int i;
|
dir.c: fix missing dir invalidation in untracked code
Let's start with how create a new directory cache after the last one
becomes invalid (e.g. because its dir mtime has changed...). In
open_cached_dir():
1. We start out with valid_cached_dir() returning false, which should
call invalidate_directory() to put a directory state back to
initial state, no untracked entries (untracked_nr zero), no sub
directory traversal (dirs[].recurse zero).
2. Since the cache cannot be used, we go the slow path opendir() and
go through items one by one via readdir(). All the directories on
disk will be added back to the cache (if not already exist in
dirs[]) and its flag "recurse" gets changed to one to note that
it's part of the cached dir travesal next time.
3. By the time we reach close_cached_dir() we should have a good
subdir list in dirs[]. Those with "recurse" flag set are the ones
present in the on-disk directory. The directory is now marked
"valid".
Next time read_directory() is called, since the directory is marked
valid, it will skip readdir(), go fast path and traverse through
dirs[] array instead.
Steps one and two need some tight cooperation. If a subdir is removed,
readdir() will not find it and of course we cannot examine/invalidate
it. To make sure removed directories on disk are gone from the cache,
step one must make sure recurse flag of all subdirs are zero.
But that's not true. If "valid" flag is already false, there is a
chance we go straight to the end of valid_cached_dir() without calling
invalidate_directory(). Or we fail to meet the "if (untracked-valid)"
condition and skip over the invalidate_directory().
After step 3, we mark the cache valid. Any stale subdir with incorrect
recurse flag becomes a real subdir next time we traverse the directory
using dirs[] array.
We could avoid this by making sure invalidate_directory() is always
called (therefore dirs[].recurse cleared) at the beginning of
open_cached_dir(). Which is what this patch does.
As to how we get into this situation, the key in the test is this
command
git checkout master
where "one/file" is replaced with "one" in the index. This index
update triggers untracked_cache_invalidate_path(), which clears valid
flag of the root directory while keeping "recurse" flag on the subdir
"one" on. On the next git-status, we go through steps 1-3 above and
save an incorrect cache on disk. The second git-status blindly follows
the bad cache data and shows the problem.
This is arguably because of a bad design where "recurse" flag plays
double roles: whether a directory should be saved on disk, and whether
it is part of a directory traversal.
We need to keep recurse flag set at "checkout master" because of the
first role: we need to keep subdir caches (dir "two" for example has
not been touched at all, no reason to throw its cache away).
As long as we make sure to ignore/reset "recurse" flag at the
beginning of a directory traversal, we're good. But maybe eventually
we should separate these two roles.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-01-24 09:30:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Invalidation increment here is just roughly correct. If
|
|
|
|
* untracked_nr or any of dirs[].recurse is non-zero, we
|
|
|
|
* should increment dir_invalidated too. But that's more
|
|
|
|
* expensive to do.
|
|
|
|
*/
|
|
|
|
if (dir->valid)
|
|
|
|
uc->dir_invalidated++;
|
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
dir->valid = 0;
|
|
|
|
dir->untracked_nr = 0;
|
2015-03-08 10:12:30 +00:00
|
|
|
for (i = 0; i < dir->dirs_nr; i++)
|
|
|
|
dir->dirs[i]->recurse = 0;
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
static int add_patterns_from_buffer(char *buf, size_t size,
|
2017-11-21 20:58:47 +00:00
|
|
|
const char *base, int baselen,
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl);
|
2017-11-21 20:58:47 +00:00
|
|
|
|
2021-02-16 14:44:34 +00:00
|
|
|
/* Flags for add_patterns() */
|
|
|
|
#define PATTERN_NOFOLLOW (1<<0)
|
|
|
|
|
2015-03-08 10:12:24 +00:00
|
|
|
/*
|
|
|
|
* Given a file with name "fname", read it (either from disk, or from
|
2017-05-05 19:53:28 +00:00
|
|
|
* an index if 'istate' is non-null), parse it and store the
|
2019-09-03 18:04:56 +00:00
|
|
|
* exclude rules in "pl".
|
2015-03-08 10:12:24 +00:00
|
|
|
*
|
2020-10-15 16:28:36 +00:00
|
|
|
* If "oid_stat" is not NULL, compute oid of the exclude file and fill
|
2019-09-03 18:04:57 +00:00
|
|
|
* stat data from disk (only valid if add_patterns returns zero). If
|
2020-10-15 16:28:36 +00:00
|
|
|
* oid_stat.valid is non-zero, "oid_stat" must contain good value as input.
|
2015-03-08 10:12:24 +00:00
|
|
|
*/
|
2019-09-03 18:04:57 +00:00
|
|
|
static int add_patterns(const char *fname, const char *base, int baselen,
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl, struct index_state *istate,
|
2021-02-16 14:44:28 +00:00
|
|
|
unsigned flags, struct oid_stat *oid_stat)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
2006-08-27 23:55:46 +00:00
|
|
|
struct stat st;
|
2017-11-21 20:58:47 +00:00
|
|
|
int r;
|
|
|
|
int fd;
|
2010-09-16 20:53:22 +00:00
|
|
|
size_t size = 0;
|
2017-11-21 20:58:47 +00:00
|
|
|
char *buf;
|
2006-05-17 02:02:14 +00:00
|
|
|
|
2021-02-16 14:44:34 +00:00
|
|
|
if (flags & PATTERN_NOFOLLOW)
|
|
|
|
fd = open_nofollow(fname, O_RDONLY);
|
|
|
|
else
|
|
|
|
fd = open(fname, O_RDONLY);
|
|
|
|
|
2009-08-20 13:47:01 +00:00
|
|
|
if (fd < 0 || fstat(fd, &st) < 0) {
|
2017-05-03 10:16:49 +00:00
|
|
|
if (fd < 0)
|
|
|
|
warn_on_fopen_errors(fname);
|
|
|
|
else
|
2009-08-20 13:47:01 +00:00
|
|
|
close(fd);
|
2017-11-21 20:58:47 +00:00
|
|
|
if (!istate)
|
2009-08-20 13:47:01 +00:00
|
|
|
return -1;
|
2017-11-21 20:58:47 +00:00
|
|
|
r = read_skip_worktree_file_from_index(istate, fname,
|
|
|
|
&size, &buf,
|
2018-01-28 00:13:12 +00:00
|
|
|
oid_stat);
|
2017-11-21 20:58:47 +00:00
|
|
|
if (r != 1)
|
|
|
|
return r;
|
2014-07-14 09:47:11 +00:00
|
|
|
} else {
|
2009-08-20 13:47:01 +00:00
|
|
|
size = xsize_t(st.st_size);
|
|
|
|
if (size == 0) {
|
2018-01-28 00:13:12 +00:00
|
|
|
if (oid_stat) {
|
|
|
|
fill_stat_data(&oid_stat->stat, &st);
|
2018-05-02 00:26:06 +00:00
|
|
|
oidcpy(&oid_stat->oid, the_hash_algo->empty_blob);
|
2018-01-28 00:13:12 +00:00
|
|
|
oid_stat->valid = 1;
|
2015-03-08 10:12:24 +00:00
|
|
|
}
|
2009-08-20 13:47:01 +00:00
|
|
|
close(fd);
|
|
|
|
return 0;
|
|
|
|
}
|
2016-02-22 22:44:28 +00:00
|
|
|
buf = xmallocz(size);
|
2009-08-20 13:47:01 +00:00
|
|
|
if (read_in_full(fd, buf, size) != size) {
|
2010-01-20 14:09:16 +00:00
|
|
|
free(buf);
|
2009-08-20 13:47:01 +00:00
|
|
|
close(fd);
|
|
|
|
return -1;
|
|
|
|
}
|
2010-01-20 14:09:16 +00:00
|
|
|
buf[size++] = '\n';
|
2009-08-20 13:47:01 +00:00
|
|
|
close(fd);
|
2018-01-28 00:13:12 +00:00
|
|
|
if (oid_stat) {
|
2015-03-08 10:12:24 +00:00
|
|
|
int pos;
|
2018-01-28 00:13:12 +00:00
|
|
|
if (oid_stat->valid &&
|
|
|
|
!match_stat_data_racy(istate, &oid_stat->stat, &st))
|
2020-10-15 16:28:36 +00:00
|
|
|
; /* no content change, oid_stat->oid still good */
|
2017-05-05 19:53:28 +00:00
|
|
|
else if (istate &&
|
|
|
|
(pos = index_name_pos(istate, fname, strlen(fname))) >= 0 &&
|
|
|
|
!ce_stage(istate->cache[pos]) &&
|
|
|
|
ce_uptodate(istate->cache[pos]) &&
|
2017-06-12 22:13:55 +00:00
|
|
|
!would_convert_to_git(istate, fname))
|
2018-01-28 00:13:12 +00:00
|
|
|
oidcpy(&oid_stat->oid,
|
|
|
|
&istate->cache[pos]->oid);
|
2015-03-08 10:12:24 +00:00
|
|
|
else
|
2020-01-30 20:32:22 +00:00
|
|
|
hash_object_file(the_hash_algo, buf, size,
|
2022-02-04 23:48:32 +00:00
|
|
|
OBJ_BLOB, &oid_stat->oid);
|
2018-01-28 00:13:12 +00:00
|
|
|
fill_stat_data(&oid_stat->stat, &st);
|
|
|
|
oid_stat->valid = 1;
|
2015-03-08 10:12:24 +00:00
|
|
|
}
|
2007-12-16 04:53:26 +00:00
|
|
|
}
|
2006-05-17 02:02:14 +00:00
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
add_patterns_from_buffer(buf, size, base, baselen, pl);
|
2017-11-21 20:58:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
static int add_patterns_from_buffer(char *buf, size_t size,
|
2017-11-21 20:58:47 +00:00
|
|
|
const char *base, int baselen,
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl)
|
2017-11-21 20:58:47 +00:00
|
|
|
{
|
|
|
|
int i, lineno = 1;
|
|
|
|
char *entry;
|
|
|
|
|
2019-11-21 22:04:41 +00:00
|
|
|
hashmap_init(&pl->recursive_hashmap, pl_hashmap_cmp, NULL, 0);
|
|
|
|
hashmap_init(&pl->parent_hashmap, pl_hashmap_cmp, NULL, 0);
|
|
|
|
|
2019-09-03 18:04:56 +00:00
|
|
|
pl->filebuf = buf;
|
2015-04-16 14:05:12 +00:00
|
|
|
|
2015-04-16 17:45:29 +00:00
|
|
|
if (skip_utf8_bom(&buf, size))
|
2019-09-03 18:04:56 +00:00
|
|
|
size -= buf - pl->filebuf;
|
2015-04-16 17:45:29 +00:00
|
|
|
|
2006-05-17 02:02:14 +00:00
|
|
|
entry = buf;
|
2015-04-16 14:05:12 +00:00
|
|
|
|
2010-01-20 14:09:16 +00:00
|
|
|
for (i = 0; i < size; i++) {
|
|
|
|
if (buf[i] == '\n') {
|
2006-05-17 02:02:14 +00:00
|
|
|
if (entry != buf + i && entry[0] != '#') {
|
|
|
|
buf[i - (i && buf[i-1] == '\r')] = 0;
|
2014-02-09 00:26:38 +00:00
|
|
|
trim_trailing_spaces(entry);
|
2019-09-03 18:04:57 +00:00
|
|
|
add_pattern(entry, base, baselen, pl, lineno);
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
2013-01-06 16:58:04 +00:00
|
|
|
lineno++;
|
2006-05-17 02:02:14 +00:00
|
|
|
entry = buf + i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
int add_patterns_from_file_to_list(const char *fname, const char *base,
|
2019-09-03 18:04:56 +00:00
|
|
|
int baselen, struct pattern_list *pl,
|
2021-02-16 14:44:28 +00:00
|
|
|
struct index_state *istate,
|
|
|
|
unsigned flags)
|
2015-03-08 10:12:24 +00:00
|
|
|
{
|
2021-02-16 14:44:28 +00:00
|
|
|
return add_patterns(fname, base, baselen, pl, istate, flags, NULL);
|
2015-03-08 10:12:24 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
int add_patterns_from_blob_to_list(
|
2017-11-21 20:58:47 +00:00
|
|
|
struct object_id *oid,
|
|
|
|
const char *base, int baselen,
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl)
|
2017-11-21 20:58:47 +00:00
|
|
|
{
|
|
|
|
char *buf;
|
|
|
|
size_t size;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = do_read_blob(oid, NULL, &size, &buf);
|
|
|
|
if (r != 1)
|
|
|
|
return r;
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
add_patterns_from_buffer(buf, size, base, baselen, pl);
|
2017-11-21 20:58:47 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
struct pattern_list *add_pattern_list(struct dir_struct *dir,
|
2013-01-06 16:58:04 +00:00
|
|
|
int group_type, const char *src)
|
2013-01-06 16:58:03 +00:00
|
|
|
{
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl;
|
2013-01-06 16:58:03 +00:00
|
|
|
struct exclude_list_group *group;
|
|
|
|
|
2023-02-27 15:28:10 +00:00
|
|
|
group = &dir->internal.exclude_list_group[group_type];
|
2019-09-03 18:04:56 +00:00
|
|
|
ALLOC_GROW(group->pl, group->nr + 1, group->alloc);
|
|
|
|
pl = &group->pl[group->nr++];
|
|
|
|
memset(pl, 0, sizeof(*pl));
|
|
|
|
pl->src = src;
|
|
|
|
return pl;
|
2013-01-06 16:58:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Used to set up core.excludesfile and .git/info/exclude lists.
|
|
|
|
*/
|
2019-09-03 18:04:57 +00:00
|
|
|
static void add_patterns_from_file_1(struct dir_struct *dir, const char *fname,
|
2018-01-28 00:13:12 +00:00
|
|
|
struct oid_stat *oid_stat)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl;
|
2015-03-08 10:12:26 +00:00
|
|
|
/*
|
|
|
|
* catch setup_standard_excludes() that's called before
|
|
|
|
* dir->untracked is assigned. That function behaves
|
|
|
|
* differently when dir->untracked is non-NULL.
|
|
|
|
*/
|
|
|
|
if (!dir->untracked)
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.unmanaged_exclude_files++;
|
2019-09-03 18:04:57 +00:00
|
|
|
pl = add_pattern_list(dir, EXC_FILE, fname);
|
2021-02-16 14:44:28 +00:00
|
|
|
if (add_patterns(fname, "", 0, pl, NULL, 0, oid_stat) < 0)
|
2018-07-21 07:49:30 +00:00
|
|
|
die(_("cannot use %s as an exclude file"), fname);
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
void add_patterns_from_file(struct dir_struct *dir, const char *fname)
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
{
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.unmanaged_exclude_files++; /* see validate_untracked_cache() */
|
2019-09-03 18:04:57 +00:00
|
|
|
add_patterns_from_file_1(dir, fname, NULL);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
}
|
|
|
|
|
2012-10-15 06:24:39 +00:00
|
|
|
int match_basename(const char *basename, int basenamelen,
|
|
|
|
const char *pattern, int prefix, int patternlen,
|
2016-03-01 17:02:59 +00:00
|
|
|
unsigned flags)
|
2012-10-15 06:24:35 +00:00
|
|
|
{
|
|
|
|
if (prefix == patternlen) {
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-28 21:47:28 +00:00
|
|
|
if (patternlen == basenamelen &&
|
2016-04-22 13:01:24 +00:00
|
|
|
!fspathncmp(pattern, basename, basenamelen))
|
2012-10-15 06:24:35 +00:00
|
|
|
return 1;
|
2019-09-03 18:04:56 +00:00
|
|
|
} else if (flags & PATTERN_FLAG_ENDSWITH) {
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-28 21:47:28 +00:00
|
|
|
/* "*literal" matching against "fooliteral" */
|
2012-10-15 06:24:35 +00:00
|
|
|
if (patternlen - 1 <= basenamelen &&
|
2016-04-22 13:01:24 +00:00
|
|
|
!fspathncmp(pattern + 1,
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-28 21:47:28 +00:00
|
|
|
basename + basenamelen - (patternlen - 1),
|
|
|
|
patternlen - 1))
|
2012-10-15 06:24:35 +00:00
|
|
|
return 1;
|
|
|
|
} else {
|
dir.c::match_basename(): pay attention to the length of string parameters
The function takes two counted strings (<basename, basenamelen> and
<pattern, patternlen>) as parameters, together with prefix (the
length of the prefix in pattern that is to be matched literally
without globbing against the basename) and EXC_* flags that tells it
how to match the pattern against the basename.
However, it did not pay attention to the length of these counted
strings. Update them to do the following:
* When the entire pattern is to be matched literally, the pattern
matches the basename only when the lengths of them are the same,
and they match up to that length.
* When the pattern is "*" followed by a string to be matched
literally, make sure that the basenamelen is equal or longer than
the "literal" part of the pattern, and the tail of the basename
string matches that literal part.
* Otherwise, use the new fnmatch_icase_mem helper to make
sure we only lookmake sure we use only look at the
counted part of the strings. Because these counted strings are
full strings most of the time, we check for termination
to avoid unnecessary allocation.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-03-28 21:47:28 +00:00
|
|
|
if (fnmatch_icase_mem(pattern, patternlen,
|
|
|
|
basename, basenamelen,
|
|
|
|
0) == 0)
|
2012-10-15 06:24:35 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-15 06:24:39 +00:00
|
|
|
int match_pathname(const char *pathname, int pathlen,
|
|
|
|
const char *base, int baselen,
|
2022-08-19 08:50:54 +00:00
|
|
|
const char *pattern, int prefix, int patternlen)
|
2012-10-15 06:24:37 +00:00
|
|
|
{
|
|
|
|
const char *name;
|
|
|
|
int namelen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* match with FNM_PATHNAME; the pattern has base implicitly
|
|
|
|
* in front of it.
|
|
|
|
*/
|
|
|
|
if (*pattern == '/') {
|
|
|
|
pattern++;
|
2013-03-28 21:47:47 +00:00
|
|
|
patternlen--;
|
2012-10-15 06:24:37 +00:00
|
|
|
prefix--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* baselen does not count the trailing slash. base[] may or
|
|
|
|
* may not end with a trailing slash though.
|
|
|
|
*/
|
|
|
|
if (pathlen < baselen + 1 ||
|
|
|
|
(baselen && pathname[baselen] != '/') ||
|
2016-04-22 13:01:24 +00:00
|
|
|
fspathncmp(pathname, base, baselen))
|
2012-10-15 06:24:37 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
namelen = baselen ? pathlen - baselen - 1 : pathlen;
|
|
|
|
name = pathname + pathlen - namelen;
|
|
|
|
|
|
|
|
if (prefix) {
|
|
|
|
/*
|
|
|
|
* if the non-wildcard part is longer than the
|
|
|
|
* remaining pathname, surely it cannot match.
|
|
|
|
*/
|
|
|
|
if (prefix > namelen)
|
|
|
|
return 0;
|
|
|
|
|
2016-04-22 13:01:24 +00:00
|
|
|
if (fspathncmp(pattern, name, prefix))
|
2012-10-15 06:24:37 +00:00
|
|
|
return 0;
|
|
|
|
pattern += prefix;
|
2013-03-28 21:48:21 +00:00
|
|
|
patternlen -= prefix;
|
2012-10-15 06:24:37 +00:00
|
|
|
name += prefix;
|
|
|
|
namelen -= prefix;
|
2013-03-28 21:48:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If the whole pattern did not have a wildcard,
|
|
|
|
* then our prefix match is all we need; we
|
|
|
|
* do not need to call fnmatch at all.
|
|
|
|
*/
|
2021-11-02 14:40:06 +00:00
|
|
|
if (!patternlen && !namelen)
|
2013-03-28 21:48:21 +00:00
|
|
|
return 1;
|
2012-10-15 06:24:37 +00:00
|
|
|
}
|
|
|
|
|
2013-03-28 21:48:21 +00:00
|
|
|
return fnmatch_icase_mem(pattern, patternlen,
|
|
|
|
name, namelen,
|
2013-04-03 16:34:04 +00:00
|
|
|
WM_PATHNAME) == 0;
|
2012-10-15 06:24:37 +00:00
|
|
|
}
|
|
|
|
|
2012-12-27 02:32:26 +00:00
|
|
|
/*
|
|
|
|
* Scan the given exclude list in reverse to see whether pathname
|
|
|
|
* should be ignored. The first match (i.e. the last on the list), if
|
|
|
|
* any, determines the fate. Returns the exclude_list element which
|
|
|
|
* matched, or NULL for undecided.
|
2006-05-17 02:02:14 +00:00
|
|
|
*/
|
2019-09-03 18:04:57 +00:00
|
|
|
static struct path_pattern *last_matching_pattern_from_list(const char *pathname,
|
2012-12-27 02:32:26 +00:00
|
|
|
int pathlen,
|
|
|
|
const char *basename,
|
|
|
|
int *dtype,
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl,
|
2017-05-05 19:53:26 +00:00
|
|
|
struct index_state *istate)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
2019-09-03 18:04:55 +00:00
|
|
|
struct path_pattern *res = NULL; /* undecided */
|
2016-03-18 18:06:15 +00:00
|
|
|
int i;
|
2006-05-17 02:02:14 +00:00
|
|
|
|
2019-09-03 18:04:56 +00:00
|
|
|
if (!pl->nr)
|
2012-12-27 02:32:26 +00:00
|
|
|
return NULL; /* undefined */
|
2008-01-31 09:17:48 +00:00
|
|
|
|
2019-09-03 18:04:56 +00:00
|
|
|
for (i = pl->nr - 1; 0 <= i; i--) {
|
|
|
|
struct path_pattern *pattern = pl->patterns[i];
|
2019-09-03 18:04:55 +00:00
|
|
|
const char *exclude = pattern->pattern;
|
|
|
|
int prefix = pattern->nowildcardlen;
|
2012-05-26 12:31:12 +00:00
|
|
|
|
2021-11-19 14:13:49 +00:00
|
|
|
if (pattern->flags & PATTERN_FLAG_MUSTBEDIR) {
|
|
|
|
*dtype = resolve_dtype(*dtype, istate, pathname, pathlen);
|
|
|
|
if (*dtype != DT_DIR)
|
|
|
|
continue;
|
|
|
|
}
|
2008-01-31 09:17:48 +00:00
|
|
|
|
2019-09-03 18:04:56 +00:00
|
|
|
if (pattern->flags & PATTERN_FLAG_NODIR) {
|
2012-10-15 06:24:35 +00:00
|
|
|
if (match_basename(basename,
|
|
|
|
pathlen - (basename - pathname),
|
2019-09-03 18:04:55 +00:00
|
|
|
exclude, prefix, pattern->patternlen,
|
|
|
|
pattern->flags)) {
|
|
|
|
res = pattern;
|
2015-09-21 09:56:14 +00:00
|
|
|
break;
|
|
|
|
}
|
2012-05-26 12:31:12 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:55 +00:00
|
|
|
assert(pattern->baselen == 0 ||
|
|
|
|
pattern->base[pattern->baselen - 1] == '/');
|
2012-10-15 06:24:37 +00:00
|
|
|
if (match_pathname(pathname, pathlen,
|
2019-09-03 18:04:55 +00:00
|
|
|
pattern->base,
|
|
|
|
pattern->baselen ? pattern->baselen - 1 : 0,
|
2022-08-19 08:50:54 +00:00
|
|
|
exclude, prefix, pattern->patternlen)) {
|
2019-09-03 18:04:55 +00:00
|
|
|
res = pattern;
|
2015-09-21 09:56:14 +00:00
|
|
|
break;
|
|
|
|
}
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
2019-09-03 18:04:55 +00:00
|
|
|
return res;
|
2012-12-27 02:32:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-09-03 18:04:58 +00:00
|
|
|
* Scan the list of patterns to determine if the ordered list
|
|
|
|
* of patterns matches on 'pathname'.
|
|
|
|
*
|
|
|
|
* Return 1 for a match, 0 for not matched and -1 for undecided.
|
2012-12-27 02:32:26 +00:00
|
|
|
*/
|
2019-09-03 18:04:58 +00:00
|
|
|
enum pattern_match_result path_matches_pattern_list(
|
|
|
|
const char *pathname, int pathlen,
|
|
|
|
const char *basename, int *dtype,
|
|
|
|
struct pattern_list *pl,
|
|
|
|
struct index_state *istate)
|
2012-12-27 02:32:26 +00:00
|
|
|
{
|
2019-09-03 18:04:55 +00:00
|
|
|
struct path_pattern *pattern;
|
2019-11-21 22:04:41 +00:00
|
|
|
struct strbuf parent_pathname = STRBUF_INIT;
|
|
|
|
int result = NOT_MATCHED;
|
2021-07-14 13:12:34 +00:00
|
|
|
size_t slash_pos;
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
if (!pl->use_cone_patterns) {
|
|
|
|
pattern = last_matching_pattern_from_list(pathname, pathlen, basename,
|
|
|
|
dtype, pl, istate);
|
|
|
|
if (pattern) {
|
|
|
|
if (pattern->flags & PATTERN_FLAG_NEGATIVE)
|
|
|
|
return NOT_MATCHED;
|
|
|
|
else
|
|
|
|
return MATCHED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return UNDECIDED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pl->full_cone)
|
|
|
|
return MATCHED;
|
|
|
|
|
|
|
|
strbuf_addch(&parent_pathname, '/');
|
|
|
|
strbuf_add(&parent_pathname, pathname, pathlen);
|
|
|
|
|
2021-07-14 13:12:34 +00:00
|
|
|
/*
|
|
|
|
* Directory entries are matched if and only if a file
|
|
|
|
* contained immediately within them is matched. For the
|
|
|
|
* case of a directory entry, modify the path to create
|
|
|
|
* a fake filename within this directory, allowing us to
|
|
|
|
* use the file-base matching logic in an equivalent way.
|
|
|
|
*/
|
|
|
|
if (parent_pathname.len > 0 &&
|
|
|
|
parent_pathname.buf[parent_pathname.len - 1] == '/') {
|
|
|
|
slash_pos = parent_pathname.len - 1;
|
|
|
|
strbuf_add(&parent_pathname, "-", 1);
|
|
|
|
} else {
|
|
|
|
const char *slash_ptr = strrchr(parent_pathname.buf, '/');
|
|
|
|
slash_pos = slash_ptr ? slash_ptr - parent_pathname.buf : 0;
|
|
|
|
}
|
|
|
|
|
2019-11-21 22:04:41 +00:00
|
|
|
if (hashmap_contains_path(&pl->recursive_hashmap,
|
|
|
|
&parent_pathname)) {
|
2019-11-21 22:04:43 +00:00
|
|
|
result = MATCHED_RECURSIVE;
|
2019-11-21 22:04:41 +00:00
|
|
|
goto done;
|
2019-09-03 18:04:58 +00:00
|
|
|
}
|
|
|
|
|
2021-07-14 13:12:34 +00:00
|
|
|
if (!slash_pos) {
|
2019-11-21 22:04:41 +00:00
|
|
|
/* include every file in root */
|
|
|
|
result = MATCHED;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2021-07-14 13:12:34 +00:00
|
|
|
strbuf_setlen(&parent_pathname, slash_pos);
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
if (hashmap_contains_path(&pl->parent_hashmap, &parent_pathname)) {
|
|
|
|
result = MATCHED;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hashmap_contains_parent(&pl->recursive_hashmap,
|
|
|
|
pathname,
|
|
|
|
&parent_pathname))
|
2019-11-21 22:04:43 +00:00
|
|
|
result = MATCHED_RECURSIVE;
|
2019-11-21 22:04:41 +00:00
|
|
|
|
|
|
|
done:
|
|
|
|
strbuf_release(&parent_pathname);
|
|
|
|
return result;
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
|
|
|
|
2021-09-08 01:42:30 +00:00
|
|
|
int init_sparse_checkout_patterns(struct index_state *istate)
|
|
|
|
{
|
|
|
|
if (!core_apply_sparse_checkout)
|
|
|
|
return 1;
|
|
|
|
if (istate->sparse_checkout_patterns)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
CALLOC_ARRAY(istate->sparse_checkout_patterns, 1);
|
|
|
|
|
|
|
|
if (get_sparse_checkout_patterns(istate->sparse_checkout_patterns) < 0) {
|
|
|
|
FREE_AND_NULL(istate->sparse_checkout_patterns);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int path_in_sparse_checkout_1(const char *path,
|
|
|
|
struct index_state *istate,
|
|
|
|
int require_cone_mode)
|
|
|
|
{
|
|
|
|
int dtype = DT_REG;
|
add, rm, mv: fix bug that prevents the update of non-sparse dirs
These three commands recently learned to avoid updating paths outside
the sparse checkout even if they are missing the SKIP_WORKTREE bit. This
is done using path_in_sparse_checkout(), which checks whether a given
path matches the current list of sparsity rules, similar to what
clear_ce_flags() does when we run "git sparse checkout init" or "git
sparse-checkout reapply". However, clear_ce_flags() uses a recursive
approach, applying the match results from parent directories on paths
that get the UNDECIDED result, whereas path_in_sparse_checkout() only
attempts to match the full path and immediately considers UNDECIDED as
NOT_MATCHED. This makes the function miss matches with leading
directories. For example, if the user has the sparsity patterns "!/a"
and "b/", add, rm, and mv will fail to update the path "a/b/c" and end
up displaying a warning about it being outside the sparse checkout even
though it isn't. This problem only occurs in full pattern mode as the
pattern matching functions never return UNDECIDED for cone mode.
To fix this, replicate the recursive behavior of clear_ce_flags() in
path_in_sparse_checkout(), falling back to the parent directory match
when a path gets the UNDECIDED result. (If this turns out to be too
expensive in some cases, we may want to later add some form of caching
to accelerate multiple queries within the same directory. This is not
implemented in this patch, though.) Also add two tests for each affected
command (add, rm, and mv) to check that they behave correctly with the
recursive pattern matching. The first test would previously fail without
this patch while the second already succeeded. It is added mostly to
make sure that we are not breaking the existing pattern matching for
directories that are really sparse, and also as a protection against any
future regressions.
Two other existing tests had to be changed as well: one test in t3602
checks that "git rm -r <dir>" won't remove sparse entries, but it didn't
allow the non-sparse entries inside <dir> to be removed. The other one,
in t7002, tested that "git mv" would correctly display a warning message
for sparse paths, but it accidentally expected the message to include
two non-sparse paths as well.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Acked-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-10-28 14:21:11 +00:00
|
|
|
enum pattern_match_result match = UNDECIDED;
|
|
|
|
const char *end, *slash;
|
2021-09-08 01:42:30 +00:00
|
|
|
|
|
|
|
/*
|
2022-03-01 20:24:24 +00:00
|
|
|
* We default to accepting a path if the path is empty, there are no
|
|
|
|
* patterns, or the patterns are of the wrong type.
|
2021-09-08 01:42:30 +00:00
|
|
|
*/
|
2022-03-01 20:24:24 +00:00
|
|
|
if (!*path ||
|
|
|
|
init_sparse_checkout_patterns(istate) ||
|
2021-09-08 01:42:30 +00:00
|
|
|
(require_cone_mode &&
|
|
|
|
!istate->sparse_checkout_patterns->use_cone_patterns))
|
|
|
|
return 1;
|
|
|
|
|
add, rm, mv: fix bug that prevents the update of non-sparse dirs
These three commands recently learned to avoid updating paths outside
the sparse checkout even if they are missing the SKIP_WORKTREE bit. This
is done using path_in_sparse_checkout(), which checks whether a given
path matches the current list of sparsity rules, similar to what
clear_ce_flags() does when we run "git sparse checkout init" or "git
sparse-checkout reapply". However, clear_ce_flags() uses a recursive
approach, applying the match results from parent directories on paths
that get the UNDECIDED result, whereas path_in_sparse_checkout() only
attempts to match the full path and immediately considers UNDECIDED as
NOT_MATCHED. This makes the function miss matches with leading
directories. For example, if the user has the sparsity patterns "!/a"
and "b/", add, rm, and mv will fail to update the path "a/b/c" and end
up displaying a warning about it being outside the sparse checkout even
though it isn't. This problem only occurs in full pattern mode as the
pattern matching functions never return UNDECIDED for cone mode.
To fix this, replicate the recursive behavior of clear_ce_flags() in
path_in_sparse_checkout(), falling back to the parent directory match
when a path gets the UNDECIDED result. (If this turns out to be too
expensive in some cases, we may want to later add some form of caching
to accelerate multiple queries within the same directory. This is not
implemented in this patch, though.) Also add two tests for each affected
command (add, rm, and mv) to check that they behave correctly with the
recursive pattern matching. The first test would previously fail without
this patch while the second already succeeded. It is added mostly to
make sure that we are not breaking the existing pattern matching for
directories that are really sparse, and also as a protection against any
future regressions.
Two other existing tests had to be changed as well: one test in t3602
checks that "git rm -r <dir>" won't remove sparse entries, but it didn't
allow the non-sparse entries inside <dir> to be removed. The other one,
in t7002, tested that "git mv" would correctly display a warning message
for sparse paths, but it accidentally expected the message to include
two non-sparse paths as well.
Signed-off-by: Matheus Tavares <matheus.bernardino@usp.br>
Acked-by: Derrick Stolee <dstolee@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-10-28 14:21:11 +00:00
|
|
|
/*
|
|
|
|
* If UNDECIDED, use the match from the parent dir (recursively), or
|
|
|
|
* fall back to NOT_MATCHED at the topmost level. Note that cone mode
|
|
|
|
* never returns UNDECIDED, so we will execute only one iteration in
|
|
|
|
* this case.
|
|
|
|
*/
|
|
|
|
for (end = path + strlen(path);
|
|
|
|
end > path && match == UNDECIDED;
|
|
|
|
end = slash) {
|
|
|
|
|
|
|
|
for (slash = end - 1; slash > path && *slash != '/'; slash--)
|
|
|
|
; /* do nothing */
|
|
|
|
|
|
|
|
match = path_matches_pattern_list(path, end - path,
|
|
|
|
slash > path ? slash + 1 : path, &dtype,
|
|
|
|
istate->sparse_checkout_patterns, istate);
|
|
|
|
|
|
|
|
/* We are going to match the parent dir now */
|
|
|
|
dtype = DT_DIR;
|
|
|
|
}
|
|
|
|
return match > 0;
|
2021-09-08 01:42:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int path_in_sparse_checkout(const char *path,
|
|
|
|
struct index_state *istate)
|
|
|
|
{
|
|
|
|
return path_in_sparse_checkout_1(path, istate, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int path_in_cone_mode_sparse_checkout(const char *path,
|
|
|
|
struct index_state *istate)
|
|
|
|
{
|
|
|
|
return path_in_sparse_checkout_1(path, istate, 1);
|
|
|
|
}
|
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
static struct path_pattern *last_matching_pattern_from_lists(
|
2019-09-03 18:04:55 +00:00
|
|
|
struct dir_struct *dir, struct index_state *istate,
|
|
|
|
const char *pathname, int pathlen,
|
|
|
|
const char *basename, int *dtype_p)
|
2013-04-15 19:11:02 +00:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
struct exclude_list_group *group;
|
2019-09-03 18:04:55 +00:00
|
|
|
struct path_pattern *pattern;
|
2013-04-15 19:11:02 +00:00
|
|
|
for (i = EXC_CMDL; i <= EXC_FILE; i++) {
|
2023-02-27 15:28:10 +00:00
|
|
|
group = &dir->internal.exclude_list_group[i];
|
2013-04-15 19:11:02 +00:00
|
|
|
for (j = group->nr - 1; j >= 0; j--) {
|
2019-09-03 18:04:57 +00:00
|
|
|
pattern = last_matching_pattern_from_list(
|
2013-04-15 19:11:02 +00:00
|
|
|
pathname, pathlen, basename, dtype_p,
|
2019-09-03 18:04:56 +00:00
|
|
|
&group->pl[j], istate);
|
2019-09-03 18:04:55 +00:00
|
|
|
if (pattern)
|
|
|
|
return pattern;
|
2013-04-15 19:11:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-04-15 19:11:37 +00:00
|
|
|
/*
|
|
|
|
* Loads the per-directory exclude list for the substring of base
|
|
|
|
* which has a char length of baselen.
|
|
|
|
*/
|
2017-05-05 19:53:29 +00:00
|
|
|
static void prep_exclude(struct dir_struct *dir,
|
|
|
|
struct index_state *istate,
|
|
|
|
const char *base, int baselen)
|
2013-04-15 19:11:37 +00:00
|
|
|
{
|
|
|
|
struct exclude_list_group *group;
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl;
|
2013-04-15 19:11:37 +00:00
|
|
|
struct exclude_stack *stk = NULL;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
struct untracked_cache_dir *untracked;
|
2013-04-15 19:11:37 +00:00
|
|
|
int current;
|
|
|
|
|
2023-02-27 15:28:10 +00:00
|
|
|
group = &dir->internal.exclude_list_group[EXC_DIRS];
|
2013-04-15 19:11:37 +00:00
|
|
|
|
2014-07-14 09:47:11 +00:00
|
|
|
/*
|
|
|
|
* Pop the exclude lists from the EXCL_DIRS exclude_list_group
|
2013-04-15 19:11:37 +00:00
|
|
|
* which originate from directories not in the prefix of the
|
2014-07-14 09:47:11 +00:00
|
|
|
* path being checked.
|
|
|
|
*/
|
2023-02-27 15:28:10 +00:00
|
|
|
while ((stk = dir->internal.exclude_stack) != NULL) {
|
2013-04-15 19:11:37 +00:00
|
|
|
if (stk->baselen <= baselen &&
|
2023-02-27 15:28:10 +00:00
|
|
|
!strncmp(dir->internal.basebuf.buf, base, stk->baselen))
|
2013-04-15 19:11:37 +00:00
|
|
|
break;
|
2023-02-27 15:28:10 +00:00
|
|
|
pl = &group->pl[dir->internal.exclude_stack->exclude_ix];
|
|
|
|
dir->internal.exclude_stack = stk->prev;
|
|
|
|
dir->internal.pattern = NULL;
|
2019-09-03 18:04:56 +00:00
|
|
|
free((char *)pl->src); /* see strbuf_detach() below */
|
2019-09-03 18:04:57 +00:00
|
|
|
clear_pattern_list(pl);
|
2013-04-15 19:11:37 +00:00
|
|
|
free(stk);
|
|
|
|
group->nr--;
|
|
|
|
}
|
|
|
|
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
/* Skip traversing into sub directories if the parent is excluded */
|
2023-02-27 15:28:10 +00:00
|
|
|
if (dir->internal.pattern)
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
return;
|
|
|
|
|
2014-07-14 09:50:22 +00:00
|
|
|
/*
|
|
|
|
* Lazy initialization. All call sites currently just
|
|
|
|
* memset(dir, 0, sizeof(*dir)) before use. Changing all of
|
|
|
|
* them seems lots of work for little benefit.
|
|
|
|
*/
|
2023-02-27 15:28:10 +00:00
|
|
|
if (!dir->internal.basebuf.buf)
|
|
|
|
strbuf_init(&dir->internal.basebuf, PATH_MAX);
|
2014-07-14 09:50:22 +00:00
|
|
|
|
2013-04-15 19:11:37 +00:00
|
|
|
/* Read from the parent directories and push them down. */
|
|
|
|
current = stk ? stk->baselen : -1;
|
2023-02-27 15:28:10 +00:00
|
|
|
strbuf_setlen(&dir->internal.basebuf, current < 0 ? 0 : current);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
if (dir->untracked)
|
|
|
|
untracked = stk ? stk->ucd : dir->untracked->root;
|
|
|
|
else
|
|
|
|
untracked = NULL;
|
|
|
|
|
2013-04-15 19:11:37 +00:00
|
|
|
while (current < baselen) {
|
|
|
|
const char *cp;
|
2018-01-28 00:13:12 +00:00
|
|
|
struct oid_stat oid_stat;
|
2013-04-15 19:11:37 +00:00
|
|
|
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(stk, 1);
|
2013-04-15 19:11:37 +00:00
|
|
|
if (current < 0) {
|
|
|
|
cp = base;
|
|
|
|
current = 0;
|
2014-07-14 09:47:11 +00:00
|
|
|
} else {
|
2013-04-15 19:11:37 +00:00
|
|
|
cp = strchr(base + current + 1, '/');
|
|
|
|
if (!cp)
|
|
|
|
die("oops in prep_exclude");
|
|
|
|
cp++;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
untracked =
|
2023-02-27 15:28:10 +00:00
|
|
|
lookup_untracked(dir->untracked,
|
|
|
|
untracked,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
base + current,
|
|
|
|
cp - base - current);
|
2013-04-15 19:11:37 +00:00
|
|
|
}
|
2023-02-27 15:28:10 +00:00
|
|
|
stk->prev = dir->internal.exclude_stack;
|
2013-04-15 19:11:37 +00:00
|
|
|
stk->baselen = cp - base;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
stk->exclude_ix = group->nr;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
stk->ucd = untracked;
|
2019-09-03 18:04:57 +00:00
|
|
|
pl = add_pattern_list(dir, EXC_DIRS, NULL);
|
2023-02-27 15:28:10 +00:00
|
|
|
strbuf_add(&dir->internal.basebuf, base + current, stk->baselen - current);
|
|
|
|
assert(stk->baselen == dir->internal.basebuf.len);
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
|
|
|
|
/* Abort if the directory is excluded */
|
|
|
|
if (stk->baselen) {
|
|
|
|
int dt = DT_DIR;
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.basebuf.buf[stk->baselen - 1] = 0;
|
|
|
|
dir->internal.pattern = last_matching_pattern_from_lists(dir,
|
2017-05-05 19:53:29 +00:00
|
|
|
istate,
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.basebuf.buf, stk->baselen - 1,
|
|
|
|
dir->internal.basebuf.buf + current, &dt);
|
|
|
|
dir->internal.basebuf.buf[stk->baselen - 1] = '/';
|
|
|
|
if (dir->internal.pattern &&
|
|
|
|
dir->internal.pattern->flags & PATTERN_FLAG_NEGATIVE)
|
|
|
|
dir->internal.pattern = NULL;
|
|
|
|
if (dir->internal.pattern) {
|
|
|
|
dir->internal.exclude_stack = stk;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-14 09:50:22 +00:00
|
|
|
/* Try to read per-directory file */
|
2018-01-28 00:13:12 +00:00
|
|
|
oidclr(&oid_stat.oid);
|
|
|
|
oid_stat.valid = 0;
|
2015-03-08 10:12:31 +00:00
|
|
|
if (dir->exclude_per_dir &&
|
|
|
|
/*
|
|
|
|
* If we know that no files have been added in
|
|
|
|
* this directory (i.e. valid_cached_dir() has
|
|
|
|
* been executed and set untracked->valid) ..
|
|
|
|
*/
|
|
|
|
(!untracked || !untracked->valid ||
|
|
|
|
/*
|
|
|
|
* .. and .gitignore does not exist before
|
2018-05-02 00:25:48 +00:00
|
|
|
* (i.e. null exclude_oid). Then we can skip
|
2015-07-31 17:35:01 +00:00
|
|
|
* loading .gitignore, which would result in
|
|
|
|
* ENOENT anyway.
|
2015-03-08 10:12:31 +00:00
|
|
|
*/
|
2018-05-02 00:25:48 +00:00
|
|
|
!is_null_oid(&untracked->exclude_oid))) {
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
/*
|
2023-02-27 15:28:10 +00:00
|
|
|
* dir->internal.basebuf gets reused by the traversal,
|
|
|
|
* but we need fname to remain unchanged to ensure the
|
|
|
|
* src member of each struct path_pattern correctly
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
* back-references its source file. Other invocations
|
2019-09-03 18:04:57 +00:00
|
|
|
* of add_pattern_list provide stable strings, so we
|
2014-07-14 09:50:22 +00:00
|
|
|
* strbuf_detach() and free() here in the caller.
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
*/
|
2014-07-14 09:50:22 +00:00
|
|
|
struct strbuf sb = STRBUF_INIT;
|
2023-02-27 15:28:10 +00:00
|
|
|
strbuf_addbuf(&sb, &dir->internal.basebuf);
|
2014-07-14 09:50:22 +00:00
|
|
|
strbuf_addstr(&sb, dir->exclude_per_dir);
|
2019-09-03 18:04:56 +00:00
|
|
|
pl->src = strbuf_detach(&sb, NULL);
|
2019-09-03 18:04:57 +00:00
|
|
|
add_patterns(pl->src, pl->src, stk->baselen, pl, istate,
|
2021-02-16 14:44:34 +00:00
|
|
|
PATTERN_NOFOLLOW,
|
2018-01-28 00:13:12 +00:00
|
|
|
untracked ? &oid_stat : NULL);
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
}
|
2015-03-08 10:12:27 +00:00
|
|
|
/*
|
|
|
|
* NEEDSWORK: when untracked cache is enabled, prep_exclude()
|
|
|
|
* will first be called in valid_cached_dir() then maybe many
|
2019-09-03 18:04:57 +00:00
|
|
|
* times more in last_matching_pattern(). When the cache is
|
|
|
|
* used, last_matching_pattern() will not be called and
|
2015-03-08 10:12:27 +00:00
|
|
|
* reading .gitignore content will be a waste.
|
|
|
|
*
|
|
|
|
* So when it's called by valid_cached_dir() and we can get
|
|
|
|
* .gitignore SHA-1 from the index (i.e. .gitignore is not
|
|
|
|
* modified on work tree), we could delay reading the
|
|
|
|
* .gitignore content until we absolutely need it in
|
2019-09-03 18:04:57 +00:00
|
|
|
* last_matching_pattern(). Be careful about ignore rule
|
2015-03-08 10:12:27 +00:00
|
|
|
* order, though, if you do that.
|
|
|
|
*/
|
|
|
|
if (untracked &&
|
2018-08-28 21:22:48 +00:00
|
|
|
!oideq(&oid_stat.oid, &untracked->exclude_oid)) {
|
2015-03-08 10:12:27 +00:00
|
|
|
invalidate_gitignore(dir->untracked, untracked);
|
2018-05-02 00:25:48 +00:00
|
|
|
oidcpy(&untracked->exclude_oid, &oid_stat.oid);
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
}
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.exclude_stack = stk;
|
2013-04-15 19:11:37 +00:00
|
|
|
current = stk->baselen;
|
|
|
|
}
|
2023-02-27 15:28:10 +00:00
|
|
|
strbuf_setlen(&dir->internal.basebuf, baselen);
|
2013-04-15 19:11:37 +00:00
|
|
|
}
|
|
|
|
|
2012-12-27 02:32:27 +00:00
|
|
|
/*
|
|
|
|
* Loads the exclude lists for the directory containing pathname, then
|
|
|
|
* scans all exclude lists to determine whether pathname is excluded.
|
|
|
|
* Returns the exclude_list element which matched, or NULL for
|
|
|
|
* undecided.
|
|
|
|
*/
|
2019-09-03 18:04:57 +00:00
|
|
|
struct path_pattern *last_matching_pattern(struct dir_struct *dir,
|
2017-05-05 19:53:30 +00:00
|
|
|
struct index_state *istate,
|
|
|
|
const char *pathname,
|
|
|
|
int *dtype_p)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
|
|
|
int pathlen = strlen(pathname);
|
2007-10-28 20:27:13 +00:00
|
|
|
const char *basename = strrchr(pathname, '/');
|
|
|
|
basename = (basename) ? basename+1 : pathname;
|
2006-05-17 02:02:14 +00:00
|
|
|
|
2017-05-05 19:53:30 +00:00
|
|
|
prep_exclude(dir, istate, pathname, basename-pathname);
|
2013-01-06 16:58:03 +00:00
|
|
|
|
2023-02-27 15:28:10 +00:00
|
|
|
if (dir->internal.pattern)
|
|
|
|
return dir->internal.pattern;
|
dir.c: unify is_excluded and is_path_excluded APIs
The is_excluded and is_path_excluded APIs are very similar, except for a
few noteworthy differences:
is_excluded doesn't handle ignored directories, results for paths within
ignored directories are incorrect. This is probably based on the premise
that recursive directory scans should stop at ignored directories, which
is no longer true (in certain cases, read_directory_recursive currently
calls is_excluded *and* is_path_excluded to get correct ignored state).
is_excluded caches parsed .gitignore files of the last directory in struct
dir_struct. If the directory changes, it finds a common parent directory
and is very careful to drop only as much state as necessary. On the other
hand, is_excluded will also read and parse .gitignore files in already
ignored directories, which are completely irrelevant.
is_path_excluded correctly handles ignored directories by checking if any
component in the path is excluded. As it uses is_excluded internally, this
unfortunately forces is_excluded to drop and re-read all .gitignore files,
as there is no common parent directory for the root dir.
is_path_excluded tracks state in a separate struct path_exclude_check,
which is essentially a wrapper of dir_struct with two more fields. However,
as is_path_excluded also modifies dir_struct, it is not possible to e.g.
use multiple path_exclude_check structures with the same dir_struct in
parallel. The additional structure just unnecessarily complicates the API.
Teach is_excluded / prep_exclude about ignored directories: whenever
entering a new directory, first check if the entire directory is excluded.
Remember the excluded state in dir_struct. Don't traverse into already
ignored directories (i.e. don't read irrelevant .gitignore files).
Directories could also be excluded by exclude patterns specified on the
command line or .git/info/exclude, so we cannot simply skip prep_exclude
entirely if there's no .gitignore file name (dir_struct.exclude_per_dir).
Move this check to just before actually reading the file.
is_path_excluded is now equivalent to is_excluded, so we can simply
redirect to it (the public API is cleaned up in the next patch).
The performance impact of the additional ignored check per directory is
hardly noticeable when reading directories recursively (e.g. 'git status').
However, performance of git commands using the is_path_excluded API (e.g.
'git ls-files --cached --ignored --exclude-standard') is greatly improved
as this no longer re-reads .gitignore files on each call.
Here's some performance data from the linux and WebKit repos (best of 10
runs on a Debian Linux on SSD, core.preloadIndex=true):
| ls-files -ci | status | status --ignored
| linux | WebKit | linux | WebKit | linux | WebKit
-------+-------+--------+-------+--------+-------+---------
before | 0.506 | 6.539 | 0.212 | 1.555 | 0.323 | 2.541
after | 0.080 | 1.191 | 0.218 | 1.583 | 0.321 | 2.579
gain | 6.325 | 5.490 | 0.972 | 0.982 | 1.006 | 0.985
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:12:14 +00:00
|
|
|
|
2019-09-03 18:04:57 +00:00
|
|
|
return last_matching_pattern_from_lists(dir, istate, pathname, pathlen,
|
2013-04-15 19:11:02 +00:00
|
|
|
basename, dtype_p);
|
2012-12-27 02:32:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loads the exclude lists for the directory containing pathname, then
|
|
|
|
* scans all exclude lists to determine whether pathname is excluded.
|
|
|
|
* Returns 1 if true, otherwise 0.
|
|
|
|
*/
|
2017-05-05 19:53:30 +00:00
|
|
|
int is_excluded(struct dir_struct *dir, struct index_state *istate,
|
|
|
|
const char *pathname, int *dtype_p)
|
2012-12-27 02:32:27 +00:00
|
|
|
{
|
2019-09-03 18:04:55 +00:00
|
|
|
struct path_pattern *pattern =
|
2019-09-03 18:04:57 +00:00
|
|
|
last_matching_pattern(dir, istate, pathname, dtype_p);
|
2019-09-03 18:04:55 +00:00
|
|
|
if (pattern)
|
2019-09-03 18:04:56 +00:00
|
|
|
return pattern->flags & PATTERN_FLAG_NEGATIVE ? 0 : 1;
|
2006-05-17 02:02:14 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-11-08 23:35:32 +00:00
|
|
|
static struct dir_entry *dir_entry_new(const char *pathname, int len)
|
|
|
|
{
|
2006-05-17 02:02:14 +00:00
|
|
|
struct dir_entry *ent;
|
|
|
|
|
2016-02-22 22:44:32 +00:00
|
|
|
FLEX_ALLOC_MEM(ent, name, pathname, len);
|
2006-05-17 02:02:14 +00:00
|
|
|
ent->len = len;
|
2006-12-29 19:01:31 +00:00
|
|
|
return ent;
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
|
|
|
|
2017-05-05 19:53:25 +00:00
|
|
|
static struct dir_entry *dir_add_name(struct dir_struct *dir,
|
|
|
|
struct index_state *istate,
|
|
|
|
const char *pathname, int len)
|
2007-06-11 13:39:44 +00:00
|
|
|
{
|
2017-05-05 19:53:25 +00:00
|
|
|
if (index_file_exists(istate, pathname, len, ignore_case))
|
2007-06-11 13:39:44 +00:00
|
|
|
return NULL;
|
|
|
|
|
2023-02-27 15:28:10 +00:00
|
|
|
ALLOC_GROW(dir->entries, dir->nr+1, dir->internal.alloc);
|
2007-06-11 13:39:44 +00:00
|
|
|
return dir->entries[dir->nr++] = dir_entry_new(pathname, len);
|
|
|
|
}
|
|
|
|
|
2017-05-05 19:53:25 +00:00
|
|
|
struct dir_entry *dir_add_ignored(struct dir_struct *dir,
|
|
|
|
struct index_state *istate,
|
|
|
|
const char *pathname, int len)
|
2007-06-11 13:39:50 +00:00
|
|
|
{
|
2017-05-05 19:53:25 +00:00
|
|
|
if (!index_name_is_other(istate, pathname, len))
|
2007-06-11 13:39:50 +00:00
|
|
|
return NULL;
|
|
|
|
|
2023-02-27 15:28:10 +00:00
|
|
|
ALLOC_GROW(dir->ignored, dir->ignored_nr+1, dir->internal.ignored_alloc);
|
2007-06-11 13:39:50 +00:00
|
|
|
return dir->ignored[dir->ignored_nr++] = dir_entry_new(pathname, len);
|
|
|
|
}
|
|
|
|
|
2007-04-11 21:49:44 +00:00
|
|
|
enum exist_status {
|
|
|
|
index_nonexistent = 0,
|
|
|
|
index_directory,
|
2010-05-14 09:31:35 +00:00
|
|
|
index_gitdir
|
2007-04-11 21:49:44 +00:00
|
|
|
};
|
|
|
|
|
2010-10-03 09:56:43 +00:00
|
|
|
/*
|
2013-08-15 19:08:45 +00:00
|
|
|
* Do not use the alphabetically sorted index to look up
|
2010-10-03 09:56:43 +00:00
|
|
|
* the directory name; instead, use the case insensitive
|
2013-09-17 07:06:15 +00:00
|
|
|
* directory hash.
|
2010-10-03 09:56:43 +00:00
|
|
|
*/
|
2017-05-05 19:53:23 +00:00
|
|
|
static enum exist_status directory_exists_in_index_icase(struct index_state *istate,
|
|
|
|
const char *dirname, int len)
|
2010-10-03 09:56:43 +00:00
|
|
|
{
|
2015-10-21 17:54:11 +00:00
|
|
|
struct cache_entry *ce;
|
2010-10-03 09:56:43 +00:00
|
|
|
|
2017-05-05 19:53:23 +00:00
|
|
|
if (index_dir_exists(istate, dirname, len))
|
2010-10-03 09:56:43 +00:00
|
|
|
return index_directory;
|
|
|
|
|
2017-05-05 19:53:23 +00:00
|
|
|
ce = index_file_exists(istate, dirname, len, ignore_case);
|
2015-10-21 17:54:11 +00:00
|
|
|
if (ce && S_ISGITLINK(ce->ce_mode))
|
2010-10-03 09:56:43 +00:00
|
|
|
return index_gitdir;
|
|
|
|
|
|
|
|
return index_nonexistent;
|
|
|
|
}
|
|
|
|
|
2007-04-11 21:49:44 +00:00
|
|
|
/*
|
|
|
|
* The index sorts alphabetically by entry name, which
|
|
|
|
* means that a gitlink sorts as '\0' at the end, while
|
|
|
|
* a directory (which is defined not as an entry, but as
|
|
|
|
* the files it contains) will sort with the '/' at the
|
|
|
|
* end.
|
|
|
|
*/
|
2017-05-05 19:53:23 +00:00
|
|
|
static enum exist_status directory_exists_in_index(struct index_state *istate,
|
|
|
|
const char *dirname, int len)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
2010-10-03 09:56:43 +00:00
|
|
|
int pos;
|
|
|
|
|
|
|
|
if (ignore_case)
|
2017-05-05 19:53:23 +00:00
|
|
|
return directory_exists_in_index_icase(istate, dirname, len);
|
2010-10-03 09:56:43 +00:00
|
|
|
|
2017-05-05 19:53:23 +00:00
|
|
|
pos = index_name_pos(istate, dirname, len);
|
2007-04-11 21:49:44 +00:00
|
|
|
if (pos < 0)
|
|
|
|
pos = -pos-1;
|
2017-05-05 19:53:23 +00:00
|
|
|
while (pos < istate->cache_nr) {
|
|
|
|
const struct cache_entry *ce = istate->cache[pos++];
|
2007-04-11 21:49:44 +00:00
|
|
|
unsigned char endchar;
|
|
|
|
|
|
|
|
if (strncmp(ce->name, dirname, len))
|
|
|
|
break;
|
|
|
|
endchar = ce->name[len];
|
|
|
|
if (endchar > '/')
|
|
|
|
break;
|
|
|
|
if (endchar == '/')
|
|
|
|
return index_directory;
|
2008-01-15 00:03:17 +00:00
|
|
|
if (!endchar && S_ISGITLINK(ce->ce_mode))
|
2007-04-11 21:49:44 +00:00
|
|
|
return index_gitdir;
|
|
|
|
}
|
|
|
|
return index_nonexistent;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we find a directory when traversing the filesystem, we
|
|
|
|
* have three distinct cases:
|
|
|
|
*
|
|
|
|
* - ignore it
|
|
|
|
* - see it as a directory
|
|
|
|
* - recurse into it
|
|
|
|
*
|
|
|
|
* and which one we choose depends on a combination of existing
|
|
|
|
* git index contents and the flags passed into the directory
|
|
|
|
* traversal routine.
|
|
|
|
*
|
|
|
|
* Case 1: If we *already* have entries in the index under that
|
2013-04-15 19:10:05 +00:00
|
|
|
* directory name, we always recurse into the directory to see
|
|
|
|
* all the files.
|
2007-04-11 21:49:44 +00:00
|
|
|
*
|
|
|
|
* Case 2: If we *already* have that directory name as a gitlink,
|
|
|
|
* we always continue to see it as a gitlink, regardless of whether
|
|
|
|
* there is an actual git directory there or not (it might not
|
|
|
|
* be checked out as a subproject!)
|
|
|
|
*
|
|
|
|
* Case 3: if we didn't have it in the index previously, we
|
|
|
|
* have a few sub-cases:
|
|
|
|
*
|
2021-05-12 17:28:21 +00:00
|
|
|
* (a) if DIR_SHOW_OTHER_DIRECTORIES flag is set, we show it as
|
|
|
|
* just a directory, unless DIR_HIDE_EMPTY_DIRECTORIES is
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
* also true, in which case we need to check if it contains any
|
|
|
|
* untracked and / or ignored files.
|
2021-05-12 17:28:21 +00:00
|
|
|
* (b) if it looks like a git directory and we don't have the
|
|
|
|
* DIR_NO_GITLINKS flag, then we treat it as a gitlink, and
|
|
|
|
* show it as a directory.
|
2007-04-11 21:49:44 +00:00
|
|
|
* (c) otherwise, we recurse into it.
|
|
|
|
*/
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
static enum path_treatment treat_directory(struct dir_struct *dir,
|
2017-05-05 19:53:32 +00:00
|
|
|
struct index_state *istate,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
struct untracked_cache_dir *untracked,
|
2020-04-01 04:17:40 +00:00
|
|
|
const char *dirname, int len, int baselen, int excluded,
|
2017-01-04 18:03:57 +00:00
|
|
|
const struct pathspec *pathspec)
|
2007-04-11 21:49:44 +00:00
|
|
|
{
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
/*
|
|
|
|
* WARNING: From this function, you can return path_recurse or you
|
|
|
|
* can call read_directory_recursive() (or neither), but
|
|
|
|
* you CAN'T DO BOTH.
|
|
|
|
*/
|
|
|
|
enum path_treatment state;
|
2020-04-01 04:17:44 +00:00
|
|
|
int matches_how = 0;
|
2022-06-16 23:44:33 +00:00
|
|
|
int check_only, stop_early;
|
2020-04-01 04:17:43 +00:00
|
|
|
int old_ignored_nr, old_untracked_nr;
|
2007-04-11 21:49:44 +00:00
|
|
|
/* The "len-1" is to strip the final '/' */
|
2020-04-01 04:17:41 +00:00
|
|
|
enum exist_status status = directory_exists_in_index(istate, dirname, len-1);
|
2007-04-11 21:49:44 +00:00
|
|
|
|
2020-04-01 04:17:41 +00:00
|
|
|
if (status == index_directory)
|
|
|
|
return path_recurse;
|
|
|
|
if (status == index_gitdir)
|
2013-07-01 21:00:32 +00:00
|
|
|
return path_none;
|
2020-04-01 04:17:41 +00:00
|
|
|
if (status != index_nonexistent)
|
|
|
|
BUG("Unhandled value for directory_exists_in_index: %d\n", status);
|
2007-04-11 21:49:44 +00:00
|
|
|
|
2020-04-01 04:17:44 +00:00
|
|
|
/*
|
|
|
|
* We don't want to descend into paths that don't match the necessary
|
|
|
|
* patterns. Clearly, if we don't have a pathspec, then we can't check
|
|
|
|
* for matching patterns. Also, if (excluded) then we know we matched
|
|
|
|
* the exclusion patterns so as an optimization we can skip checking
|
|
|
|
* for matching patterns.
|
|
|
|
*/
|
|
|
|
if (pathspec && !excluded) {
|
dir: fix treatment of negated pathspecs
do_match_pathspec() started life as match_pathspec_depth_1() and for
correctness was only supposed to be called from match_pathspec_depth().
match_pathspec_depth() was later renamed to match_pathspec(), so the
invariant we expect today is that do_match_pathspec() has no direct
callers outside of match_pathspec().
Unfortunately, this intention was lost with the renames of the two
functions, and additional calls to do_match_pathspec() were added in
commits 75a6315f74 ("ls-files: add pathspec matching for submodules",
2016-10-07) and 89a1f4aaf7 ("dir: if our pathspec might match files
under a dir, recurse into it", 2019-09-17). Of course,
do_match_pathspec() had an important advantge over match_pathspec() --
match_pathspec() would hardcode flags to one of two values, and these
new callers needed to pass some other value for flags. Also, although
calling do_match_pathspec() directly was incorrect, there likely wasn't
any difference in the observable end output, because the bug just meant
that fill_diretory() would recurse into unneeded directories. Since
subsequent does-this-path-match checks on individual paths under the
directory would cause those extra paths to be filtered out, the only
difference from using the wrong function was unnecessary computation.
The second of those bad calls to do_match_pathspec() was involved -- via
either direct movement or via copying+editing -- into a number of later
refactors. See commits 777b420347 ("dir: synchronize
treat_leading_path() and read_directory_recursive()", 2019-12-19),
8d92fb2927 ("dir: replace exponential algorithm with a linear one",
2020-04-01), and 95c11ecc73 ("Fix error-prone fill_directory() API; make
it only return matches", 2020-04-01). The last of those introduced the
usage of do_match_pathspec() on an individual file, and thus resulted in
individual paths being returned that shouldn't be.
The problem with calling do_match_pathspec() instead of match_pathspec()
is that any negated patterns such as ':!unwanted_path` will be ignored.
Add a new match_pathspec_with_flags() function to fulfill the needs of
specifying special flags while still correctly checking negated
patterns, add a big comment above do_match_pathspec() to prevent others
from misusing it, and correct current callers of do_match_pathspec() to
instead use either match_pathspec() or match_pathspec_with_flags().
One final note is that DO_MATCH_LEADING_PATHSPEC needs special
consideration when working with DO_MATCH_EXCLUDE. The point of
DO_MATCH_LEADING_PATHSPEC is that if we have a pathspec like
*/Makefile
and we are checking a directory path like
src/module/component
that we want to consider it a match so that we recurse into the
directory because it _might_ have a file named Makefile somewhere below.
However, when we are using an exclusion pattern, i.e. we have a pathspec
like
:(exclude)*/Makefile
we do NOT want to say that a directory path like
src/module/component
is a (negative) match. While there *might* be a file named 'Makefile'
somewhere below that directory, there could also be other files and we
cannot pre-emptively rule all the files under that directory out; we
need to recurse and then check individual files. Adjust the
DO_MATCH_LEADING_PATHSPEC logic to only get activated for positive
pathspecs.
Reported-by: John Millikin <jmillikin@stripe.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-05 18:23:48 +00:00
|
|
|
matches_how = match_pathspec_with_flags(istate, pathspec,
|
|
|
|
dirname, len,
|
|
|
|
0 /* prefix */,
|
|
|
|
NULL /* seen */,
|
|
|
|
DO_MATCH_LEADING_PATHSPEC);
|
2020-04-01 04:17:44 +00:00
|
|
|
if (!matches_how)
|
|
|
|
return path_none;
|
|
|
|
}
|
clean: avoid removing untracked files in a nested git repository
Users expect files in a nested git repository to be left alone unless
sufficiently forced (with two -f's). Unfortunately, in certain
circumstances, git would delete both tracked (and possibly dirty) files
and untracked files within a nested repository. To explain how this
happens, let's contrast a couple cases. First, take the following
example setup (which assumes we are already within a git repo):
git init nested
cd nested
>tracked
git add tracked
git commit -m init
>untracked
cd ..
In this setup, everything works as expected; running 'git clean -fd'
will result in fill_directory() returning the following paths:
nested/
nested/tracked
nested/untracked
and then correct_untracked_entries() would notice this can be compressed
to
nested/
and then since "nested/" is a directory, we would call
remove_dirs("nested/", ...), which would
check is_nonbare_repository_dir() and then decide to skip it.
However, if someone also creates an ignored file:
>nested/ignored
then running 'git clean -fd' would result in fill_directory() returning
the same paths:
nested/
nested/tracked
nested/untracked
but correct_untracked_entries() will notice that we had ignored entries
under nested/ and thus simplify this list to
nested/tracked
nested/untracked
Since these are not directories, we do not call remove_dirs() which was
the only place that had the is_nonbare_repository_dir() safety check --
resulting in us deleting both the untracked file and the tracked (and
possibly dirty) file.
One possible fix for this issue would be walking the parent directories
of each path and checking if they represent nonbare repositories, but
that would be wasteful. Even if we added caching of some sort, it's
still a waste because we should have been able to check that "nested/"
represented a nonbare repository before even descending into it in the
first place. Add a DIR_SKIP_NESTED_GIT flag to dir_struct.flags and use
it to prevent fill_directory() and friends from descending into nested
git repos.
With this change, we also modify two regression tests added in commit
91479b9c72f1 ("t7300: add tests to document behavior of clean and nested
git", 2015-06-15). That commit, nor its series, nor the six previous
iterations of that series on the mailing list discussed why those tests
coded the expectation they did. In fact, it appears their purpose was
simply to test _existing_ behavior to make sure that the performance
changes didn't change the behavior. However, these two tests directly
contradicted the manpage's claims that two -f's were required to delete
files/directories under a nested git repository. While one could argue
that the user gave an explicit path which matched files/directories that
were within a nested repository, there's a slippery slope that becomes
very difficult for users to understand once you go down that route (e.g.
what if they specified "git clean -f -d '*.c'"?) It would also be hard
to explain what the exact behavior was; avoid such problems by making it
really simple.
Also, clean up some grammar errors describing this functionality in the
git-clean manpage.
Finally, there are still a couple bugs with -ffd not cleaning out enough
(e.g. missing the nested .git) and with -ffdX possibly cleaning out the
wrong files (paying attention to outer .gitignore instead of inner).
This patch does not address these cases at all (and does not change the
behavior relative to those flags), it only fixes the handling when given
a single -f. See
https://public-inbox.org/git/20190905212043.GC32087@szeder.dev/ for more
discussion of the -ffd[X?] bugs.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-17 16:35:02 +00:00
|
|
|
|
2020-04-01 04:17:44 +00:00
|
|
|
|
2020-04-01 04:17:41 +00:00
|
|
|
if ((dir->flags & DIR_SKIP_NESTED_GIT) ||
|
|
|
|
!(dir->flags & DIR_NO_GITLINKS)) {
|
2022-06-16 23:19:55 +00:00
|
|
|
/*
|
|
|
|
* Determine if `dirname` is a nested repo by confirming that:
|
|
|
|
* 1) we are in a nonbare repository, and
|
|
|
|
* 2) `dirname` is not an immediate parent of `the_repository->gitdir`,
|
|
|
|
* which could occur if the git_dir or worktree location was
|
|
|
|
* manually configured by the user; see t2205 testcases 1-3 for
|
|
|
|
* examples where this matters
|
|
|
|
*/
|
2022-06-16 23:44:33 +00:00
|
|
|
int nested_repo;
|
2020-04-01 04:17:41 +00:00
|
|
|
struct strbuf sb = STRBUF_INIT;
|
|
|
|
strbuf_addstr(&sb, dirname);
|
|
|
|
nested_repo = is_nonbare_repository_dir(&sb);
|
2022-06-16 23:19:55 +00:00
|
|
|
|
|
|
|
if (nested_repo) {
|
|
|
|
char *real_dirname, *real_gitdir;
|
|
|
|
strbuf_addstr(&sb, ".git");
|
|
|
|
real_dirname = real_pathdup(sb.buf, 1);
|
|
|
|
real_gitdir = real_pathdup(the_repository->gitdir, 1);
|
|
|
|
|
|
|
|
nested_repo = !!strcmp(real_dirname, real_gitdir);
|
|
|
|
free(real_gitdir);
|
|
|
|
free(real_dirname);
|
|
|
|
}
|
2020-04-01 04:17:41 +00:00
|
|
|
strbuf_release(&sb);
|
2022-06-16 23:44:33 +00:00
|
|
|
|
|
|
|
if (nested_repo) {
|
|
|
|
if ((dir->flags & DIR_SKIP_NESTED_GIT) ||
|
|
|
|
(matches_how == MATCHED_RECURSIVELY_LEADING_PATHSPEC))
|
|
|
|
return path_none;
|
|
|
|
return excluded ? path_excluded : path_untracked;
|
|
|
|
}
|
2020-08-12 07:12:36 +00:00
|
|
|
}
|
2007-04-11 21:49:44 +00:00
|
|
|
|
2020-04-01 04:17:41 +00:00
|
|
|
if (!(dir->flags & DIR_SHOW_OTHER_DIRECTORIES)) {
|
2020-04-01 04:17:40 +00:00
|
|
|
if (excluded &&
|
|
|
|
(dir->flags & DIR_SHOW_IGNORED_TOO) &&
|
|
|
|
(dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING)) {
|
status: add option to show ignored files differently
Teach the status command more flexibility in how ignored files are
reported. Currently, the reporting of ignored files and untracked
files are linked. You cannot control how ignored files are reported
independently of how untracked files are reported (i.e. `all` vs
`normal`). This makes it impossible to show untracked files with the
`all` option, but show ignored files with the `normal` option.
This work 1) adds the ability to control the reporting of ignored
files independently of untracked files and 2) introduces the concept
of status reporting ignored paths that explicitly match an ignored
pattern. There are 2 benefits to these changes: 1) if a consumer needs
all untracked files but not all ignored files, there is a performance
benefit to not scanning all contents of an ignored directory and 2)
returning ignored files that explicitly match a path allow a consumer
to make more informed decisions about when a status result might be
stale.
This commit implements --ignored=matching with --untracked-files=all.
The following commit will implement --ignored=matching with
--untracked=files=normal.
As an example of where this flexibility could be useful is that our
application (Visual Studio) runs the status command and presents the
output. It shows all untracked files individually (e.g. using the
'--untracked-files==all' option), and would like to know about which
paths are ignored. It uses information about ignored paths to make
decisions about when the status result might have changed.
Additionally, many projects place build output into directories inside
a repository's working directory (e.g. in "bin/" and "obj/"
directories). Normal usage is to explicitly ignore these 2 directory
names in the .gitignore file (rather than or in addition to the *.obj
pattern).If an application could know that these directories are
explicitly ignored, it could infer that all contents are ignored as
well and make better informed decisions about files in these
directories. It could infer that any changes under these paths would
not affect the output of status. Additionally, there can be a
significant performance benefit by avoiding scanning through ignored
directories.
When status is set to report matching ignored files, it has the
following behavior. Ignored files and directories that explicitly
match an exclude pattern are reported. If an ignored directory matches
an exclude pattern, then the path of the directory is returned. If a
directory does not match an exclude pattern, but all of its contents
are ignored, then the contained files are reported instead of the
directory.
Signed-off-by: Jameson Miller <jamill@microsoft.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-30 17:21:37 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This is an excluded directory and we are
|
|
|
|
* showing ignored paths that match an exclude
|
|
|
|
* pattern. (e.g. show directory as ignored
|
|
|
|
* only if it matches an exclude pattern).
|
|
|
|
* This path will either be 'path_excluded`
|
|
|
|
* (if we are showing empty directories or if
|
|
|
|
* the directory is not empty), or will be
|
|
|
|
* 'path_none' (empty directory, and we are
|
|
|
|
* not showing empty directories).
|
|
|
|
*/
|
|
|
|
if (!(dir->flags & DIR_HIDE_EMPTY_DIRECTORIES))
|
|
|
|
return path_excluded;
|
|
|
|
|
|
|
|
if (read_directory_recursive(dir, istate, dirname, len,
|
|
|
|
untracked, 1, 1, pathspec) == path_excluded)
|
|
|
|
return path_excluded;
|
|
|
|
|
|
|
|
return path_none;
|
|
|
|
}
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
return path_recurse;
|
2007-04-11 21:49:44 +00:00
|
|
|
}
|
|
|
|
|
dir: avoid unnecessary traversal into ignored directory
The show_other_directories case in treat_directory() tried to handle
both excludes and untracked files with the same logic, and mishandled
both the excludes and the untracked files in the process, in different
ways. Split that logic apart, and then focus on the logic for the
excludes; a subsequent commit will address the logic for untracked
files.
For show_other_directories, an excluded directory means that
every path underneath that directory will also be excluded. Given that
the calling code requested to just show directories when everything
under a directory had the same state (that's what the
"DIR_SHOW_OTHER_DIRECTORIES" flag means), we generally do not need to
traverse into such directories and can just immediately mark them as
ignored (i.e. as path_excluded). The only reason we cannot just
immediately return path_excluded is the DIR_HIDE_EMPTY_DIRECTORIES flag
and the possibility that the ignored directory is an empty directory.
The code previously treated DIR_SHOW_IGNORED_TOO in most cases as an
exception as well, which was wrong. It can sometimes reduce the number
of cases where we need to recurse (namely if
DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set), but should not be able
to increase the number of cases where we need to recurse. Fix the logic
accordingly.
Some sidenotes about possible confusion with dir.c:
* "ignored" often refers to an untracked ignore", i.e. a file which is
not tracked which matches one of the ignore/exclusion rules. But you
can also have a "tracked ignore", a tracked file that happens to match
one of the ignore/exclusion rules and which dir.c has to worry about
since "git ls-files -c -i" is supposed to list them.
* The dir code often uses "ignored" and "excluded" interchangeably,
which you need to keep in mind while reading the code.
* "exclude" is used multiple ways in the code:
* As noted above, "exclude" is often a synonym for "ignored".
* The logic for parsing .gitignore files was re-used in
.git/info/sparse-checkout, except there it is used to mark paths that
the user wants to *keep*. This was mostly addressed by commit
65edd96aec ("treewide: rename 'exclude' methods to 'pattern'",
2019-09-03), but every once in a while you'll find a comment about
"exclude" referring to these patterns that might in fact be in use
by the sparse-checkout machinery for inclusion rules.
* The word "EXCLUDE" is also used for pathspec negation, as in
(pathspec->items[3].magic & PATHSPEC_EXCLUDE)
Thus if a user had a .gitignore file containing
*~
*.log
!settings.log
And then ran
git add -- 'settings.*' ':^settings.log'
Then :^settings.log is a pathspec negation making settings.log not
be requested to be added even though all other settings.* files are
being added. Also, !settings.log in the gitignore file is a negative
exclude pattern meaning that settings.log is normally a file we
want to track even though all other *.log files are ignored.
Sometimes it feels like dir.c needs its own glossary with its many
definitions, including the multiply-defined terms.
Reported-by: Jason Gore <Jason.Gore@microsoft.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-12 17:28:19 +00:00
|
|
|
assert(dir->flags & DIR_SHOW_OTHER_DIRECTORIES);
|
2012-12-30 14:39:00 +00:00
|
|
|
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
/*
|
|
|
|
* If we have a pathspec which could match something _below_ this
|
|
|
|
* directory (e.g. when checking 'subdir/' having a pathspec like
|
|
|
|
* 'subdir/some/deep/path/file' or 'subdir/widget-*.c'), then we
|
|
|
|
* need to recurse.
|
|
|
|
*/
|
2020-04-01 04:17:44 +00:00
|
|
|
if (matches_how == MATCHED_RECURSIVELY_LEADING_PATHSPEC)
|
|
|
|
return path_recurse;
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
|
dir: avoid unnecessary traversal into ignored directory
The show_other_directories case in treat_directory() tried to handle
both excludes and untracked files with the same logic, and mishandled
both the excludes and the untracked files in the process, in different
ways. Split that logic apart, and then focus on the logic for the
excludes; a subsequent commit will address the logic for untracked
files.
For show_other_directories, an excluded directory means that
every path underneath that directory will also be excluded. Given that
the calling code requested to just show directories when everything
under a directory had the same state (that's what the
"DIR_SHOW_OTHER_DIRECTORIES" flag means), we generally do not need to
traverse into such directories and can just immediately mark them as
ignored (i.e. as path_excluded). The only reason we cannot just
immediately return path_excluded is the DIR_HIDE_EMPTY_DIRECTORIES flag
and the possibility that the ignored directory is an empty directory.
The code previously treated DIR_SHOW_IGNORED_TOO in most cases as an
exception as well, which was wrong. It can sometimes reduce the number
of cases where we need to recurse (namely if
DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set), but should not be able
to increase the number of cases where we need to recurse. Fix the logic
accordingly.
Some sidenotes about possible confusion with dir.c:
* "ignored" often refers to an untracked ignore", i.e. a file which is
not tracked which matches one of the ignore/exclusion rules. But you
can also have a "tracked ignore", a tracked file that happens to match
one of the ignore/exclusion rules and which dir.c has to worry about
since "git ls-files -c -i" is supposed to list them.
* The dir code often uses "ignored" and "excluded" interchangeably,
which you need to keep in mind while reading the code.
* "exclude" is used multiple ways in the code:
* As noted above, "exclude" is often a synonym for "ignored".
* The logic for parsing .gitignore files was re-used in
.git/info/sparse-checkout, except there it is used to mark paths that
the user wants to *keep*. This was mostly addressed by commit
65edd96aec ("treewide: rename 'exclude' methods to 'pattern'",
2019-09-03), but every once in a while you'll find a comment about
"exclude" referring to these patterns that might in fact be in use
by the sparse-checkout machinery for inclusion rules.
* The word "EXCLUDE" is also used for pathspec negation, as in
(pathspec->items[3].magic & PATHSPEC_EXCLUDE)
Thus if a user had a .gitignore file containing
*~
*.log
!settings.log
And then ran
git add -- 'settings.*' ':^settings.log'
Then :^settings.log is a pathspec negation making settings.log not
be requested to be added even though all other settings.* files are
being added. Also, !settings.log in the gitignore file is a negative
exclude pattern meaning that settings.log is normally a file we
want to track even though all other *.log files are ignored.
Sometimes it feels like dir.c needs its own glossary with its many
definitions, including the multiply-defined terms.
Reported-by: Jason Gore <Jason.Gore@microsoft.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-12 17:28:19 +00:00
|
|
|
/* Special cases for where this directory is excluded/ignored */
|
|
|
|
if (excluded) {
|
|
|
|
/*
|
2021-05-12 17:28:21 +00:00
|
|
|
* If DIR_SHOW_OTHER_DIRECTORIES is set and we're not
|
dir: avoid unnecessary traversal into ignored directory
The show_other_directories case in treat_directory() tried to handle
both excludes and untracked files with the same logic, and mishandled
both the excludes and the untracked files in the process, in different
ways. Split that logic apart, and then focus on the logic for the
excludes; a subsequent commit will address the logic for untracked
files.
For show_other_directories, an excluded directory means that
every path underneath that directory will also be excluded. Given that
the calling code requested to just show directories when everything
under a directory had the same state (that's what the
"DIR_SHOW_OTHER_DIRECTORIES" flag means), we generally do not need to
traverse into such directories and can just immediately mark them as
ignored (i.e. as path_excluded). The only reason we cannot just
immediately return path_excluded is the DIR_HIDE_EMPTY_DIRECTORIES flag
and the possibility that the ignored directory is an empty directory.
The code previously treated DIR_SHOW_IGNORED_TOO in most cases as an
exception as well, which was wrong. It can sometimes reduce the number
of cases where we need to recurse (namely if
DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set), but should not be able
to increase the number of cases where we need to recurse. Fix the logic
accordingly.
Some sidenotes about possible confusion with dir.c:
* "ignored" often refers to an untracked ignore", i.e. a file which is
not tracked which matches one of the ignore/exclusion rules. But you
can also have a "tracked ignore", a tracked file that happens to match
one of the ignore/exclusion rules and which dir.c has to worry about
since "git ls-files -c -i" is supposed to list them.
* The dir code often uses "ignored" and "excluded" interchangeably,
which you need to keep in mind while reading the code.
* "exclude" is used multiple ways in the code:
* As noted above, "exclude" is often a synonym for "ignored".
* The logic for parsing .gitignore files was re-used in
.git/info/sparse-checkout, except there it is used to mark paths that
the user wants to *keep*. This was mostly addressed by commit
65edd96aec ("treewide: rename 'exclude' methods to 'pattern'",
2019-09-03), but every once in a while you'll find a comment about
"exclude" referring to these patterns that might in fact be in use
by the sparse-checkout machinery for inclusion rules.
* The word "EXCLUDE" is also used for pathspec negation, as in
(pathspec->items[3].magic & PATHSPEC_EXCLUDE)
Thus if a user had a .gitignore file containing
*~
*.log
!settings.log
And then ran
git add -- 'settings.*' ':^settings.log'
Then :^settings.log is a pathspec negation making settings.log not
be requested to be added even though all other settings.* files are
being added. Also, !settings.log in the gitignore file is a negative
exclude pattern meaning that settings.log is normally a file we
want to track even though all other *.log files are ignored.
Sometimes it feels like dir.c needs its own glossary with its many
definitions, including the multiply-defined terms.
Reported-by: Jason Gore <Jason.Gore@microsoft.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-12 17:28:19 +00:00
|
|
|
* hiding empty directories, there is no need to
|
|
|
|
* recurse into an ignored directory.
|
|
|
|
*/
|
|
|
|
if (!(dir->flags & DIR_HIDE_EMPTY_DIRECTORIES))
|
|
|
|
return path_excluded;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Even if we are hiding empty directories, we can still avoid
|
|
|
|
* recursing into ignored directories for DIR_SHOW_IGNORED_TOO
|
|
|
|
* if DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set.
|
|
|
|
*/
|
|
|
|
if ((dir->flags & DIR_SHOW_IGNORED_TOO) &&
|
|
|
|
(dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING))
|
|
|
|
return path_excluded;
|
|
|
|
}
|
|
|
|
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
/*
|
dir: avoid unnecessary traversal into ignored directory
The show_other_directories case in treat_directory() tried to handle
both excludes and untracked files with the same logic, and mishandled
both the excludes and the untracked files in the process, in different
ways. Split that logic apart, and then focus on the logic for the
excludes; a subsequent commit will address the logic for untracked
files.
For show_other_directories, an excluded directory means that
every path underneath that directory will also be excluded. Given that
the calling code requested to just show directories when everything
under a directory had the same state (that's what the
"DIR_SHOW_OTHER_DIRECTORIES" flag means), we generally do not need to
traverse into such directories and can just immediately mark them as
ignored (i.e. as path_excluded). The only reason we cannot just
immediately return path_excluded is the DIR_HIDE_EMPTY_DIRECTORIES flag
and the possibility that the ignored directory is an empty directory.
The code previously treated DIR_SHOW_IGNORED_TOO in most cases as an
exception as well, which was wrong. It can sometimes reduce the number
of cases where we need to recurse (namely if
DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set), but should not be able
to increase the number of cases where we need to recurse. Fix the logic
accordingly.
Some sidenotes about possible confusion with dir.c:
* "ignored" often refers to an untracked ignore", i.e. a file which is
not tracked which matches one of the ignore/exclusion rules. But you
can also have a "tracked ignore", a tracked file that happens to match
one of the ignore/exclusion rules and which dir.c has to worry about
since "git ls-files -c -i" is supposed to list them.
* The dir code often uses "ignored" and "excluded" interchangeably,
which you need to keep in mind while reading the code.
* "exclude" is used multiple ways in the code:
* As noted above, "exclude" is often a synonym for "ignored".
* The logic for parsing .gitignore files was re-used in
.git/info/sparse-checkout, except there it is used to mark paths that
the user wants to *keep*. This was mostly addressed by commit
65edd96aec ("treewide: rename 'exclude' methods to 'pattern'",
2019-09-03), but every once in a while you'll find a comment about
"exclude" referring to these patterns that might in fact be in use
by the sparse-checkout machinery for inclusion rules.
* The word "EXCLUDE" is also used for pathspec negation, as in
(pathspec->items[3].magic & PATHSPEC_EXCLUDE)
Thus if a user had a .gitignore file containing
*~
*.log
!settings.log
And then ran
git add -- 'settings.*' ':^settings.log'
Then :^settings.log is a pathspec negation making settings.log not
be requested to be added even though all other settings.* files are
being added. Also, !settings.log in the gitignore file is a negative
exclude pattern meaning that settings.log is normally a file we
want to track even though all other *.log files are ignored.
Sometimes it feels like dir.c needs its own glossary with its many
definitions, including the multiply-defined terms.
Reported-by: Jason Gore <Jason.Gore@microsoft.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-12 17:28:19 +00:00
|
|
|
* Other than the path_recurse case above, we only need to
|
2021-05-12 17:28:20 +00:00
|
|
|
* recurse into untracked directories if any of the following
|
dir: avoid unnecessary traversal into ignored directory
The show_other_directories case in treat_directory() tried to handle
both excludes and untracked files with the same logic, and mishandled
both the excludes and the untracked files in the process, in different
ways. Split that logic apart, and then focus on the logic for the
excludes; a subsequent commit will address the logic for untracked
files.
For show_other_directories, an excluded directory means that
every path underneath that directory will also be excluded. Given that
the calling code requested to just show directories when everything
under a directory had the same state (that's what the
"DIR_SHOW_OTHER_DIRECTORIES" flag means), we generally do not need to
traverse into such directories and can just immediately mark them as
ignored (i.e. as path_excluded). The only reason we cannot just
immediately return path_excluded is the DIR_HIDE_EMPTY_DIRECTORIES flag
and the possibility that the ignored directory is an empty directory.
The code previously treated DIR_SHOW_IGNORED_TOO in most cases as an
exception as well, which was wrong. It can sometimes reduce the number
of cases where we need to recurse (namely if
DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set), but should not be able
to increase the number of cases where we need to recurse. Fix the logic
accordingly.
Some sidenotes about possible confusion with dir.c:
* "ignored" often refers to an untracked ignore", i.e. a file which is
not tracked which matches one of the ignore/exclusion rules. But you
can also have a "tracked ignore", a tracked file that happens to match
one of the ignore/exclusion rules and which dir.c has to worry about
since "git ls-files -c -i" is supposed to list them.
* The dir code often uses "ignored" and "excluded" interchangeably,
which you need to keep in mind while reading the code.
* "exclude" is used multiple ways in the code:
* As noted above, "exclude" is often a synonym for "ignored".
* The logic for parsing .gitignore files was re-used in
.git/info/sparse-checkout, except there it is used to mark paths that
the user wants to *keep*. This was mostly addressed by commit
65edd96aec ("treewide: rename 'exclude' methods to 'pattern'",
2019-09-03), but every once in a while you'll find a comment about
"exclude" referring to these patterns that might in fact be in use
by the sparse-checkout machinery for inclusion rules.
* The word "EXCLUDE" is also used for pathspec negation, as in
(pathspec->items[3].magic & PATHSPEC_EXCLUDE)
Thus if a user had a .gitignore file containing
*~
*.log
!settings.log
And then ran
git add -- 'settings.*' ':^settings.log'
Then :^settings.log is a pathspec negation making settings.log not
be requested to be added even though all other settings.* files are
being added. Also, !settings.log in the gitignore file is a negative
exclude pattern meaning that settings.log is normally a file we
want to track even though all other *.log files are ignored.
Sometimes it feels like dir.c needs its own glossary with its many
definitions, including the multiply-defined terms.
Reported-by: Jason Gore <Jason.Gore@microsoft.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-12 17:28:19 +00:00
|
|
|
* bits is set:
|
2021-05-12 17:28:20 +00:00
|
|
|
* - DIR_SHOW_IGNORED (because then we need to determine if
|
|
|
|
* there are ignored entries below)
|
|
|
|
* - DIR_SHOW_IGNORED_TOO (same as above)
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
* - DIR_HIDE_EMPTY_DIRECTORIES (because we have to determine if
|
|
|
|
* the directory is empty)
|
|
|
|
*/
|
dir: avoid unnecessary traversal into ignored directory
The show_other_directories case in treat_directory() tried to handle
both excludes and untracked files with the same logic, and mishandled
both the excludes and the untracked files in the process, in different
ways. Split that logic apart, and then focus on the logic for the
excludes; a subsequent commit will address the logic for untracked
files.
For show_other_directories, an excluded directory means that
every path underneath that directory will also be excluded. Given that
the calling code requested to just show directories when everything
under a directory had the same state (that's what the
"DIR_SHOW_OTHER_DIRECTORIES" flag means), we generally do not need to
traverse into such directories and can just immediately mark them as
ignored (i.e. as path_excluded). The only reason we cannot just
immediately return path_excluded is the DIR_HIDE_EMPTY_DIRECTORIES flag
and the possibility that the ignored directory is an empty directory.
The code previously treated DIR_SHOW_IGNORED_TOO in most cases as an
exception as well, which was wrong. It can sometimes reduce the number
of cases where we need to recurse (namely if
DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set), but should not be able
to increase the number of cases where we need to recurse. Fix the logic
accordingly.
Some sidenotes about possible confusion with dir.c:
* "ignored" often refers to an untracked ignore", i.e. a file which is
not tracked which matches one of the ignore/exclusion rules. But you
can also have a "tracked ignore", a tracked file that happens to match
one of the ignore/exclusion rules and which dir.c has to worry about
since "git ls-files -c -i" is supposed to list them.
* The dir code often uses "ignored" and "excluded" interchangeably,
which you need to keep in mind while reading the code.
* "exclude" is used multiple ways in the code:
* As noted above, "exclude" is often a synonym for "ignored".
* The logic for parsing .gitignore files was re-used in
.git/info/sparse-checkout, except there it is used to mark paths that
the user wants to *keep*. This was mostly addressed by commit
65edd96aec ("treewide: rename 'exclude' methods to 'pattern'",
2019-09-03), but every once in a while you'll find a comment about
"exclude" referring to these patterns that might in fact be in use
by the sparse-checkout machinery for inclusion rules.
* The word "EXCLUDE" is also used for pathspec negation, as in
(pathspec->items[3].magic & PATHSPEC_EXCLUDE)
Thus if a user had a .gitignore file containing
*~
*.log
!settings.log
And then ran
git add -- 'settings.*' ':^settings.log'
Then :^settings.log is a pathspec negation making settings.log not
be requested to be added even though all other settings.* files are
being added. Also, !settings.log in the gitignore file is a negative
exclude pattern meaning that settings.log is normally a file we
want to track even though all other *.log files are ignored.
Sometimes it feels like dir.c needs its own glossary with its many
definitions, including the multiply-defined terms.
Reported-by: Jason Gore <Jason.Gore@microsoft.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-12 17:28:19 +00:00
|
|
|
if (!excluded &&
|
2021-05-12 17:28:20 +00:00
|
|
|
!(dir->flags & (DIR_SHOW_IGNORED |
|
|
|
|
DIR_SHOW_IGNORED_TOO |
|
dir: avoid unnecessary traversal into ignored directory
The show_other_directories case in treat_directory() tried to handle
both excludes and untracked files with the same logic, and mishandled
both the excludes and the untracked files in the process, in different
ways. Split that logic apart, and then focus on the logic for the
excludes; a subsequent commit will address the logic for untracked
files.
For show_other_directories, an excluded directory means that
every path underneath that directory will also be excluded. Given that
the calling code requested to just show directories when everything
under a directory had the same state (that's what the
"DIR_SHOW_OTHER_DIRECTORIES" flag means), we generally do not need to
traverse into such directories and can just immediately mark them as
ignored (i.e. as path_excluded). The only reason we cannot just
immediately return path_excluded is the DIR_HIDE_EMPTY_DIRECTORIES flag
and the possibility that the ignored directory is an empty directory.
The code previously treated DIR_SHOW_IGNORED_TOO in most cases as an
exception as well, which was wrong. It can sometimes reduce the number
of cases where we need to recurse (namely if
DIR_SHOW_IGNORED_TOO_MODE_MATCHING is also set), but should not be able
to increase the number of cases where we need to recurse. Fix the logic
accordingly.
Some sidenotes about possible confusion with dir.c:
* "ignored" often refers to an untracked ignore", i.e. a file which is
not tracked which matches one of the ignore/exclusion rules. But you
can also have a "tracked ignore", a tracked file that happens to match
one of the ignore/exclusion rules and which dir.c has to worry about
since "git ls-files -c -i" is supposed to list them.
* The dir code often uses "ignored" and "excluded" interchangeably,
which you need to keep in mind while reading the code.
* "exclude" is used multiple ways in the code:
* As noted above, "exclude" is often a synonym for "ignored".
* The logic for parsing .gitignore files was re-used in
.git/info/sparse-checkout, except there it is used to mark paths that
the user wants to *keep*. This was mostly addressed by commit
65edd96aec ("treewide: rename 'exclude' methods to 'pattern'",
2019-09-03), but every once in a while you'll find a comment about
"exclude" referring to these patterns that might in fact be in use
by the sparse-checkout machinery for inclusion rules.
* The word "EXCLUDE" is also used for pathspec negation, as in
(pathspec->items[3].magic & PATHSPEC_EXCLUDE)
Thus if a user had a .gitignore file containing
*~
*.log
!settings.log
And then ran
git add -- 'settings.*' ':^settings.log'
Then :^settings.log is a pathspec negation making settings.log not
be requested to be added even though all other settings.* files are
being added. Also, !settings.log in the gitignore file is a negative
exclude pattern meaning that settings.log is normally a file we
want to track even though all other *.log files are ignored.
Sometimes it feels like dir.c needs its own glossary with its many
definitions, including the multiply-defined terms.
Reported-by: Jason Gore <Jason.Gore@microsoft.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-05-12 17:28:19 +00:00
|
|
|
DIR_HIDE_EMPTY_DIRECTORIES))) {
|
|
|
|
return path_untracked;
|
|
|
|
}
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
|
|
|
|
/*
|
2020-06-11 06:59:30 +00:00
|
|
|
* Even if we don't want to know all the paths under an untracked or
|
|
|
|
* ignored directory, we may still need to go into the directory to
|
|
|
|
* determine if it is empty (because with DIR_HIDE_EMPTY_DIRECTORIES,
|
|
|
|
* an empty directory should be path_none instead of path_excluded or
|
|
|
|
* path_untracked).
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
*/
|
|
|
|
check_only = ((dir->flags & DIR_HIDE_EMPTY_DIRECTORIES) &&
|
|
|
|
!(dir->flags & DIR_SHOW_IGNORED_TOO));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* However, there's another optimization possible as a subset of
|
|
|
|
* check_only, based on the cases we have to consider:
|
|
|
|
* A) Directory matches no exclude patterns:
|
|
|
|
* * Directory is empty => path_none
|
|
|
|
* * Directory has an untracked file under it => path_untracked
|
|
|
|
* * Directory has only ignored files under it => path_excluded
|
|
|
|
* B) Directory matches an exclude pattern:
|
|
|
|
* * Directory is empty => path_none
|
|
|
|
* * Directory has an untracked file under it => path_excluded
|
|
|
|
* * Directory has only ignored files under it => path_excluded
|
|
|
|
* In case A, we can exit as soon as we've found an untracked
|
|
|
|
* file but otherwise have to walk all files. In case B, though,
|
|
|
|
* we can stop at the first file we find under the directory.
|
|
|
|
*/
|
|
|
|
stop_early = check_only && excluded;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If /every/ file within an untracked directory is ignored, then
|
|
|
|
* we want to treat the directory as ignored (for e.g. status
|
|
|
|
* --porcelain), without listing the individual ignored files
|
|
|
|
* underneath. To do so, we'll save the current ignored_nr, and
|
|
|
|
* pop all the ones added after it if it turns out the entire
|
2020-04-01 04:17:43 +00:00
|
|
|
* directory is ignored. Also, when DIR_SHOW_IGNORED_TOO and
|
|
|
|
* !DIR_KEEP_UNTRACKED_CONTENTS then we don't want to show
|
|
|
|
* untracked paths so will need to pop all those off the last
|
|
|
|
* after we traverse.
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
*/
|
|
|
|
old_ignored_nr = dir->ignored_nr;
|
2020-04-01 04:17:43 +00:00
|
|
|
old_untracked_nr = dir->nr;
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
|
|
|
|
/* Actually recurse into dirname now, we'll fixup the state later. */
|
2015-08-19 13:01:25 +00:00
|
|
|
untracked = lookup_untracked(dir->untracked, untracked,
|
|
|
|
dirname + baselen, len - baselen);
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
state = read_directory_recursive(dir, istate, dirname, len, untracked,
|
|
|
|
check_only, stop_early, pathspec);
|
|
|
|
|
|
|
|
/* There are a variety of reasons we may need to fixup the state... */
|
|
|
|
if (state == path_excluded) {
|
|
|
|
/* state == path_excluded implies all paths under
|
|
|
|
* dirname were ignored...
|
|
|
|
*
|
|
|
|
* if running e.g. `git status --porcelain --ignored=matching`,
|
|
|
|
* then we want to see the subpaths that are ignored.
|
|
|
|
*
|
|
|
|
* if running e.g. just `git status --porcelain`, then
|
|
|
|
* we just want the directory itself to be listed as ignored
|
|
|
|
* and not the individual paths underneath.
|
|
|
|
*/
|
|
|
|
int want_ignored_subpaths =
|
|
|
|
((dir->flags & DIR_SHOW_IGNORED_TOO) &&
|
|
|
|
(dir->flags & DIR_SHOW_IGNORED_TOO_MODE_MATCHING));
|
|
|
|
|
|
|
|
if (want_ignored_subpaths) {
|
|
|
|
/*
|
|
|
|
* with --ignored=matching, we want the subpaths
|
|
|
|
* INSTEAD of the directory itself.
|
|
|
|
*/
|
|
|
|
state = path_none;
|
|
|
|
} else {
|
|
|
|
int i;
|
|
|
|
for (i = old_ignored_nr + 1; i<dir->ignored_nr; ++i)
|
|
|
|
FREE_AND_NULL(dir->ignored[i]);
|
|
|
|
dir->ignored_nr = old_ignored_nr;
|
|
|
|
}
|
|
|
|
}
|
2017-09-18 17:24:33 +00:00
|
|
|
|
|
|
|
/*
|
2020-04-01 04:17:43 +00:00
|
|
|
* We may need to ignore some of the untracked paths we found while
|
|
|
|
* traversing subdirectories.
|
2017-09-18 17:24:33 +00:00
|
|
|
*/
|
2020-04-01 04:17:43 +00:00
|
|
|
if ((dir->flags & DIR_SHOW_IGNORED_TOO) &&
|
|
|
|
!(dir->flags & DIR_KEEP_UNTRACKED_CONTENTS)) {
|
|
|
|
int i;
|
|
|
|
for (i = old_untracked_nr + 1; i<dir->nr; ++i)
|
|
|
|
FREE_AND_NULL(dir->entries[i]);
|
|
|
|
dir->nr = old_untracked_nr;
|
|
|
|
}
|
|
|
|
|
2017-09-18 17:24:33 +00:00
|
|
|
/*
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
* If there is nothing under the current directory and we are not
|
|
|
|
* hiding empty directories, then we need to report on the
|
|
|
|
* untracked or ignored status of the directory itself.
|
2017-09-18 17:24:33 +00:00
|
|
|
*/
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
if (state == path_none && !(dir->flags & DIR_HIDE_EMPTY_DIRECTORIES))
|
|
|
|
state = excluded ? path_excluded : path_untracked;
|
|
|
|
|
|
|
|
return state;
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
|
|
|
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 03:39:30 +00:00
|
|
|
/*
|
|
|
|
* This is an inexact early pruning of any recursive directory
|
|
|
|
* reading - if the path cannot possibly be in the pathspec,
|
|
|
|
* return true, and we'll skip it early.
|
|
|
|
*/
|
2017-01-04 18:03:57 +00:00
|
|
|
static int simplify_away(const char *path, int pathlen,
|
|
|
|
const struct pathspec *pathspec)
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 03:39:30 +00:00
|
|
|
{
|
2017-01-04 18:03:57 +00:00
|
|
|
int i;
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 03:39:30 +00:00
|
|
|
|
2017-01-04 18:03:57 +00:00
|
|
|
if (!pathspec || !pathspec->nr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
GUARD_PATHSPEC(pathspec,
|
|
|
|
PATHSPEC_FROMTOP |
|
|
|
|
PATHSPEC_MAXDEPTH |
|
|
|
|
PATHSPEC_LITERAL |
|
|
|
|
PATHSPEC_GLOB |
|
|
|
|
PATHSPEC_ICASE |
|
2017-03-13 18:23:21 +00:00
|
|
|
PATHSPEC_EXCLUDE |
|
|
|
|
PATHSPEC_ATTR);
|
2017-01-04 18:03:57 +00:00
|
|
|
|
|
|
|
for (i = 0; i < pathspec->nr; i++) {
|
|
|
|
const struct pathspec_item *item = &pathspec->items[i];
|
|
|
|
int len = item->nowildcard_len;
|
|
|
|
|
|
|
|
if (len > pathlen)
|
|
|
|
len = pathlen;
|
|
|
|
if (!ps_strncmp(item, item->match, path, len))
|
|
|
|
return 0;
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 03:39:30 +00:00
|
|
|
}
|
2017-01-04 18:03:57 +00:00
|
|
|
|
|
|
|
return 1;
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 03:39:30 +00:00
|
|
|
}
|
|
|
|
|
2010-03-11 07:15:43 +00:00
|
|
|
/*
|
|
|
|
* This function tells us whether an excluded path matches a
|
|
|
|
* list of "interesting" pathspecs. That is, whether a path matched
|
|
|
|
* by any of the pathspecs could possibly be ignored by excluding
|
|
|
|
* the specified path. This can happen if:
|
|
|
|
*
|
|
|
|
* 1. the path is mentioned explicitly in the pathspec
|
|
|
|
*
|
|
|
|
* 2. the path is a directory prefix of some element in the
|
|
|
|
* pathspec
|
|
|
|
*/
|
2017-01-04 18:03:57 +00:00
|
|
|
static int exclude_matches_pathspec(const char *path, int pathlen,
|
|
|
|
const struct pathspec *pathspec)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!pathspec || !pathspec->nr)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
GUARD_PATHSPEC(pathspec,
|
|
|
|
PATHSPEC_FROMTOP |
|
|
|
|
PATHSPEC_MAXDEPTH |
|
|
|
|
PATHSPEC_LITERAL |
|
|
|
|
PATHSPEC_GLOB |
|
|
|
|
PATHSPEC_ICASE |
|
attr: enable attr pathspec magic for git-add and git-stash
Allow users to limit or exclude files based on file attributes
during git-add and git-stash.
For example, the chromium project would like to use
$ git add . ':(exclude,attr:submodule)'
as submodules are managed by an external tool, forbidding end users
to record changes with "git add". Allowing "git add" to often
records changes that users do not want in their commits.
This commit does not change any attr magic implementation. It is
only adding attr as an allowed pathspec in git-add and git-stash,
which was previously blocked by GUARD_PATHSPEC and a pathspec mask
in parse_pathspec()).
However, we fix a bug in prefix_magic() where attr values were
unintentionally removed. This was triggerable when parse_pathspec()
is called with PATHSPEC_PREFIX_ORIGIN as a flag, which was the case
for git-stash (Bug originally filed here [*])
Furthermore, while other commands hit this code path it did not
result in unexpected behavior because this bug only impacts the
pathspec->items->original field which is NOT used to filter
paths. However, git-stash does use pathspec->items->original when
building args used to call other git commands. (See add_pathspecs()
usage and implementation in stash.c)
It is possible that when the attr pathspec feature was first added
in b0db704652 (pathspec: allow querying for attributes, 2017-03-13),
"PATHSPEC_ATTR" was just unintentionally left out of a few
GUARD_PATHSPEC() invocations.
Later, to get a more user-friendly error message when attr was used
with git-add, PATHSPEC_ATTR was added as a mask to git-add's
invocation of parse_pathspec() 84d938b732 (add: do not accept
pathspec magic 'attr', 2018-09-18). However, this user-friendly
error message was never added for git-stash.
[Reference]
* https://lore.kernel.org/git/CAMmZTi-0QKtj7Q=sbC5qhipGsQxJFOY-Qkk1jfkRYwfF5FcUVg@mail.gmail.com/)
Signed-off-by: Joanna Wang <jojwang@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2023-11-03 16:34:48 +00:00
|
|
|
PATHSPEC_EXCLUDE |
|
|
|
|
PATHSPEC_ATTR);
|
2017-01-04 18:03:57 +00:00
|
|
|
|
|
|
|
for (i = 0; i < pathspec->nr; i++) {
|
|
|
|
const struct pathspec_item *item = &pathspec->items[i];
|
|
|
|
int len = item->nowildcard_len;
|
|
|
|
|
|
|
|
if (len == pathlen &&
|
|
|
|
!ps_strncmp(item, item->match, path, pathlen))
|
|
|
|
return 1;
|
|
|
|
if (len > pathlen &&
|
|
|
|
item->match[pathlen] == '/' &&
|
|
|
|
!ps_strncmp(item, item->match, path, pathlen))
|
|
|
|
return 1;
|
builtin-add: simplify (and increase accuracy of) exclude handling
Previously, the code would always set up the excludes, and then manually
pick through the pathspec we were given, assuming that non-added but
existing paths were just ignored. This was mostly correct, but would
erroneously mark a totally empty directory as 'ignored'.
Instead, we now use the collect_ignored option of dir_struct, which
unambiguously tells us whether a path was ignored. This simplifies the
code, and means empty directories are now just not mentioned at all.
Furthermore, we now conditionally ask dir_struct to respect excludes,
depending on whether the '-f' flag has been set. This means we don't have
to pick through the result, checking for an 'ignored' flag; ignored entries
were either added or not in the first place.
We can safely get rid of the special 'ignored' flags to dir_entry, which
were not used anywhere else.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Jonas Fonseca <fonseca@diku.dk>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-06-12 21:42:14 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-05-05 19:53:24 +00:00
|
|
|
static int get_index_dtype(struct index_state *istate,
|
|
|
|
const char *path, int len)
|
2009-07-09 20:14:28 +00:00
|
|
|
{
|
|
|
|
int pos;
|
Convert "struct cache_entry *" to "const ..." wherever possible
I attempted to make index_state->cache[] a "const struct cache_entry **"
to find out how existing entries in index are modified and where. The
question I have is what do we do if we really need to keep track of on-disk
changes in the index. The result is
- diff-lib.c: setting CE_UPTODATE
- name-hash.c: setting CE_HASHED
- preload-index.c, read-cache.c, unpack-trees.c and
builtin/update-index: obvious
- entry.c: write_entry() may refresh the checked out entry via
fill_stat_cache_info(). This causes "non-const struct cache_entry
*" in builtin/apply.c, builtin/checkout-index.c and
builtin/checkout.c
- builtin/ls-files.c: --with-tree changes stagemask and may set
CE_UPDATE
Of these, write_entry() and its call sites are probably most
interesting because it modifies on-disk info. But this is stat info
and can be retrieved via refresh, at least for porcelain
commands. Other just uses ce_flags for local purposes.
So, keeping track of "dirty" entries is just a matter of setting a
flag in index modification functions exposed by read-cache.c. Except
unpack-trees, the rest of the code base does not do anything funny
behind read-cache's back.
The actual patch is less valueable than the summary above. But if
anyone wants to re-identify the above sites. Applying this patch, then
this:
diff --git a/cache.h b/cache.h
index 430d021..1692891 100644
--- a/cache.h
+++ b/cache.h
@@ -267,7 +267,7 @@ static inline unsigned int canon_mode(unsigned int mode)
#define cache_entry_size(len) (offsetof(struct cache_entry,name) + (len) + 1)
struct index_state {
- struct cache_entry **cache;
+ const struct cache_entry **cache;
unsigned int version;
unsigned int cache_nr, cache_alloc, cache_changed;
struct string_list *resolve_undo;
will help quickly identify them without bogus warnings.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-07-09 15:29:00 +00:00
|
|
|
const struct cache_entry *ce;
|
2009-07-09 20:14:28 +00:00
|
|
|
|
2017-05-05 19:53:24 +00:00
|
|
|
ce = index_file_exists(istate, path, len, 0);
|
2009-07-09 20:14:28 +00:00
|
|
|
if (ce) {
|
|
|
|
if (!ce_uptodate(ce))
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
if (S_ISGITLINK(ce->ce_mode))
|
|
|
|
return DT_DIR;
|
|
|
|
/*
|
|
|
|
* Nobody actually cares about the
|
|
|
|
* difference between DT_LNK and DT_REG
|
|
|
|
*/
|
|
|
|
return DT_REG;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Try to look it up as a directory */
|
2017-05-05 19:53:24 +00:00
|
|
|
pos = index_name_pos(istate, path, len);
|
2009-07-09 20:14:28 +00:00
|
|
|
if (pos >= 0)
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
pos = -pos-1;
|
2017-05-05 19:53:24 +00:00
|
|
|
while (pos < istate->cache_nr) {
|
|
|
|
ce = istate->cache[pos++];
|
2009-07-09 20:14:28 +00:00
|
|
|
if (strncmp(ce->name, path, len))
|
|
|
|
break;
|
|
|
|
if (ce->name[len] > '/')
|
|
|
|
break;
|
|
|
|
if (ce->name[len] < '/')
|
|
|
|
continue;
|
|
|
|
if (!ce_uptodate(ce))
|
|
|
|
break; /* continue? */
|
|
|
|
return DT_DIR;
|
|
|
|
}
|
|
|
|
return DT_UNKNOWN;
|
|
|
|
}
|
|
|
|
|
2023-10-09 21:58:55 +00:00
|
|
|
unsigned char get_dtype(struct dirent *e, struct strbuf *path,
|
|
|
|
int follow_symlink)
|
2023-10-09 21:58:54 +00:00
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
unsigned char dtype = DTYPE(e);
|
|
|
|
size_t base_path_len;
|
|
|
|
|
2023-10-09 21:58:55 +00:00
|
|
|
if (dtype != DT_UNKNOWN && !(follow_symlink && dtype == DT_LNK))
|
2023-10-09 21:58:54 +00:00
|
|
|
return dtype;
|
|
|
|
|
2023-10-09 21:58:55 +00:00
|
|
|
/*
|
|
|
|
* d_type unknown or unfollowed symlink, try to fall back on [l]stat
|
|
|
|
* results. If [l]stat fails, explicitly set DT_UNKNOWN.
|
|
|
|
*/
|
2023-10-09 21:58:54 +00:00
|
|
|
base_path_len = path->len;
|
|
|
|
strbuf_addstr(path, e->d_name);
|
2023-10-09 21:58:55 +00:00
|
|
|
if ((follow_symlink && stat(path->buf, &st)) ||
|
|
|
|
(!follow_symlink && lstat(path->buf, &st)))
|
2023-10-09 21:58:54 +00:00
|
|
|
goto cleanup;
|
|
|
|
|
|
|
|
/* determine d_type from st_mode */
|
|
|
|
if (S_ISREG(st.st_mode))
|
|
|
|
dtype = DT_REG;
|
|
|
|
else if (S_ISDIR(st.st_mode))
|
|
|
|
dtype = DT_DIR;
|
|
|
|
else if (S_ISLNK(st.st_mode))
|
|
|
|
dtype = DT_LNK;
|
|
|
|
|
|
|
|
cleanup:
|
|
|
|
strbuf_setlen(path, base_path_len);
|
|
|
|
return dtype;
|
|
|
|
}
|
|
|
|
|
2020-01-16 20:21:55 +00:00
|
|
|
static int resolve_dtype(int dtype, struct index_state *istate,
|
|
|
|
const char *path, int len)
|
Fix directory scanner to correctly ignore files without d_type
On Fri, 19 Oct 2007, Todd T. Fries wrote:
> If DT_UNKNOWN exists, then we have to do a stat() of some form to
> find out the right type.
That happened in the case of a pathname that was ignored, and we did
not ask for "dir->show_ignored". That test used to be *together*
with the "DTYPE(de) != DT_DIR", but splitting the two tests up
means that we can do that (common) test before we even bother to
calculate the real dtype.
Of course, that optimization only matters for systems that don't
have, or don't fill in DTYPE properly.
I also clarified the real relationship between "exclude" and
"dir->show_ignored". It used to do
if (exclude != dir->show_ignored) {
..
which wasn't exactly obvious, because it triggers for two different
cases:
- the path is marked excluded, but we are not interested in ignored
files: ignore it
- the path is *not* excluded, but we *are* interested in ignored
files: ignore it unless it's a directory, in which case we might
have ignored files inside the directory and need to recurse
into it).
so this splits them into those two cases, since the first case
doesn't even care about the type.
I also made a the DT_UNKNOWN case a separate helper function,
and added some commentary to the cases.
Linus
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
2007-10-19 17:59:22 +00:00
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
if (dtype != DT_UNKNOWN)
|
|
|
|
return dtype;
|
2017-05-05 19:53:24 +00:00
|
|
|
dtype = get_index_dtype(istate, path, len);
|
2009-07-09 20:14:28 +00:00
|
|
|
if (dtype != DT_UNKNOWN)
|
|
|
|
return dtype;
|
|
|
|
if (lstat(path, &st))
|
Fix directory scanner to correctly ignore files without d_type
On Fri, 19 Oct 2007, Todd T. Fries wrote:
> If DT_UNKNOWN exists, then we have to do a stat() of some form to
> find out the right type.
That happened in the case of a pathname that was ignored, and we did
not ask for "dir->show_ignored". That test used to be *together*
with the "DTYPE(de) != DT_DIR", but splitting the two tests up
means that we can do that (common) test before we even bother to
calculate the real dtype.
Of course, that optimization only matters for systems that don't
have, or don't fill in DTYPE properly.
I also clarified the real relationship between "exclude" and
"dir->show_ignored". It used to do
if (exclude != dir->show_ignored) {
..
which wasn't exactly obvious, because it triggers for two different
cases:
- the path is marked excluded, but we are not interested in ignored
files: ignore it
- the path is *not* excluded, but we *are* interested in ignored
files: ignore it unless it's a directory, in which case we might
have ignored files inside the directory and need to recurse
into it).
so this splits them into those two cases, since the first case
doesn't even care about the type.
I also made a the DT_UNKNOWN case a separate helper function,
and added some commentary to the cases.
Linus
Signed-off-by: Shawn O. Pearce <spearce@spearce.org>
2007-10-19 17:59:22 +00:00
|
|
|
return dtype;
|
|
|
|
if (S_ISREG(st.st_mode))
|
|
|
|
return DT_REG;
|
|
|
|
if (S_ISDIR(st.st_mode))
|
|
|
|
return DT_DIR;
|
|
|
|
if (S_ISLNK(st.st_mode))
|
|
|
|
return DT_LNK;
|
|
|
|
return dtype;
|
|
|
|
}
|
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
static enum path_treatment treat_path_fast(struct dir_struct *dir,
|
|
|
|
struct cached_dir *cdir,
|
2017-05-05 19:53:32 +00:00
|
|
|
struct index_state *istate,
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
struct strbuf *path,
|
|
|
|
int baselen,
|
2017-01-04 18:03:57 +00:00
|
|
|
const struct pathspec *pathspec)
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
{
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
/*
|
|
|
|
* WARNING: From this function, you can return path_recurse or you
|
|
|
|
* can call read_directory_recursive() (or neither), but
|
|
|
|
* you CAN'T DO BOTH.
|
|
|
|
*/
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
strbuf_setlen(path, baselen);
|
|
|
|
if (!cdir->ucd) {
|
|
|
|
strbuf_addstr(path, cdir->file);
|
|
|
|
return path_untracked;
|
|
|
|
}
|
|
|
|
strbuf_addstr(path, cdir->ucd->name);
|
|
|
|
/* treat_one_path() does this before it calls treat_directory() */
|
use strbuf_complete to conditionally append slash
When working with paths in strbufs, we frequently want to
ensure that a directory contains a trailing slash before
appending to it. We can shorten this code (and make the
intent more obvious) by calling strbuf_complete.
Most of these cases are trivially identical conversions, but
there are two things to note:
- in a few cases we did not check that the strbuf is
non-empty (which would lead to an out-of-bounds memory
access). These were generally not triggerable in
practice, either from earlier assertions, or typically
because we would have just fed the strbuf to opendir(),
which would choke on an empty path.
- in a few cases we indexed the buffer with "original_len"
or similar, rather than the current sb->len, and it is
not immediately obvious from the diff that they are the
same. In all of these cases, I manually verified that
the strbuf does not change between the assignment and
the strbuf_complete call.
This does not convert cases which look like:
if (sb->len && !is_dir_sep(sb->buf[sb->len - 1]))
strbuf_addch(sb, '/');
as those are obviously semantically different. Some of these
cases arguably should be doing that, but that is out of
scope for this change, which aims purely for cleanup with no
behavior change (and at least it will make such sites easier
to find and examine in the future, as we can grep for
strbuf_complete).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-24 21:08:35 +00:00
|
|
|
strbuf_complete(path, '/');
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
if (cdir->ucd->check_only)
|
|
|
|
/*
|
|
|
|
* check_only is set as a result of treat_directory() getting
|
|
|
|
* to its bottom. Verify again the same set of directories
|
|
|
|
* with check_only set.
|
|
|
|
*/
|
2017-05-05 19:53:32 +00:00
|
|
|
return read_directory_recursive(dir, istate, path->buf, path->len,
|
2017-09-18 17:24:33 +00:00
|
|
|
cdir->ucd, 1, 0, pathspec);
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
/*
|
|
|
|
* We get path_recurse in the first run when
|
|
|
|
* directory_exists_in_index() returns index_nonexistent. We
|
|
|
|
* are sure that new changes in the index does not impact the
|
|
|
|
* outcome. Return now.
|
|
|
|
*/
|
|
|
|
return path_recurse;
|
|
|
|
}
|
|
|
|
|
2010-01-09 04:56:16 +00:00
|
|
|
static enum path_treatment treat_path(struct dir_struct *dir,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
struct untracked_cache_dir *untracked,
|
2015-03-08 10:12:28 +00:00
|
|
|
struct cached_dir *cdir,
|
2017-05-05 19:53:32 +00:00
|
|
|
struct index_state *istate,
|
2012-05-01 11:25:24 +00:00
|
|
|
struct strbuf *path,
|
2010-01-09 04:56:16 +00:00
|
|
|
int baselen,
|
2017-01-04 18:03:57 +00:00
|
|
|
const struct pathspec *pathspec)
|
2010-01-09 04:56:16 +00:00
|
|
|
{
|
2020-04-01 04:17:40 +00:00
|
|
|
int has_path_in_index, dtype, excluded;
|
ls-files -k: a directory only can be killed if the index has a non-directory
"ls-files -o" and "ls-files -k" both traverse the working tree down
to find either all untracked paths or those that will be "killed"
(removed from the working tree to make room) when the paths recorded
in the index are checked out. It is necessary to traverse the
working tree fully when enumerating all the "other" paths, but when
we are only interested in "killed" paths, we can take advantage of
the fact that paths that do not overlap with entries in the index
can never be killed.
The treat_one_path() helper function, which is called during the
recursive traversal, is the ideal place to implement an
optimization.
When we are looking at a directory P in the working tree, there are
three cases:
(1) P exists in the index. Everything inside the directory P in
the working tree needs to go when P is checked out from the
index.
(2) P does not exist in the index, but there is P/Q in the index.
We know P will stay a directory when we check out the contents
of the index, but we do not know yet if there is a directory
P/Q in the working tree to be killed, so we need to recurse.
(3) P does not exist in the index, and there is no P/Q in the index
to require P to be a directory, either. Only in this case, we
know that everything inside P will not be killed without
recursing.
Note that this helper is called by treat_leading_path() that decides
if we need to traverse only subdirectories of a single common
leading directory, which is essential for this optimization to be
correct. This caller checks each level of the leading path
component from shallower directory to deeper ones, and that is what
allows us to only check if the path appears in the index. If the
call to treat_one_path() weren't there, given a path P/Q/R, the real
traversal may start from directory P/Q/R, even when the index
records P as a regular file, and we would end up having to check if
any leading subpath in P/Q/R, e.g. P, appears in the index.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-15 19:13:46 +00:00
|
|
|
|
2020-01-16 20:21:55 +00:00
|
|
|
if (!cdir->d_name)
|
2020-09-30 12:35:00 +00:00
|
|
|
return treat_path_fast(dir, cdir, istate, path,
|
2017-01-04 18:03:57 +00:00
|
|
|
baselen, pathspec);
|
2020-01-16 20:21:55 +00:00
|
|
|
if (is_dot_or_dotdot(cdir->d_name) || !fspathcmp(cdir->d_name, ".git"))
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
return path_none;
|
2012-05-01 11:25:24 +00:00
|
|
|
strbuf_setlen(path, baselen);
|
2020-01-16 20:21:55 +00:00
|
|
|
strbuf_addstr(path, cdir->d_name);
|
2017-01-04 18:03:57 +00:00
|
|
|
if (simplify_away(path->buf, path->len, pathspec))
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
return path_none;
|
2010-01-09 04:56:16 +00:00
|
|
|
|
2020-04-01 04:17:38 +00:00
|
|
|
dtype = resolve_dtype(cdir->d_type, istate, path->buf, path->len);
|
2013-04-15 19:13:35 +00:00
|
|
|
|
|
|
|
/* Always exclude indexed files */
|
2020-04-01 04:17:38 +00:00
|
|
|
has_path_in_index = !!index_file_exists(istate, path->buf, path->len,
|
|
|
|
ignore_case);
|
ls-files -k: a directory only can be killed if the index has a non-directory
"ls-files -o" and "ls-files -k" both traverse the working tree down
to find either all untracked paths or those that will be "killed"
(removed from the working tree to make room) when the paths recorded
in the index are checked out. It is necessary to traverse the
working tree fully when enumerating all the "other" paths, but when
we are only interested in "killed" paths, we can take advantage of
the fact that paths that do not overlap with entries in the index
can never be killed.
The treat_one_path() helper function, which is called during the
recursive traversal, is the ideal place to implement an
optimization.
When we are looking at a directory P in the working tree, there are
three cases:
(1) P exists in the index. Everything inside the directory P in
the working tree needs to go when P is checked out from the
index.
(2) P does not exist in the index, but there is P/Q in the index.
We know P will stay a directory when we check out the contents
of the index, but we do not know yet if there is a directory
P/Q in the working tree to be killed, so we need to recurse.
(3) P does not exist in the index, and there is no P/Q in the index
to require P to be a directory, either. Only in this case, we
know that everything inside P will not be killed without
recursing.
Note that this helper is called by treat_leading_path() that decides
if we need to traverse only subdirectories of a single common
leading directory, which is essential for this optimization to be
correct. This caller checks each level of the leading path
component from shallower directory to deeper ones, and that is what
allows us to only check if the path appears in the index. If the
call to treat_one_path() weren't there, given a path P/Q/R, the real
traversal may start from directory P/Q/R, even when the index
records P as a regular file, and we would end up having to check if
any leading subpath in P/Q/R, e.g. P, appears in the index.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-15 19:13:46 +00:00
|
|
|
if (dtype != DT_DIR && has_path_in_index)
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
return path_none;
|
2013-04-15 19:13:35 +00:00
|
|
|
|
ls-files -k: a directory only can be killed if the index has a non-directory
"ls-files -o" and "ls-files -k" both traverse the working tree down
to find either all untracked paths or those that will be "killed"
(removed from the working tree to make room) when the paths recorded
in the index are checked out. It is necessary to traverse the
working tree fully when enumerating all the "other" paths, but when
we are only interested in "killed" paths, we can take advantage of
the fact that paths that do not overlap with entries in the index
can never be killed.
The treat_one_path() helper function, which is called during the
recursive traversal, is the ideal place to implement an
optimization.
When we are looking at a directory P in the working tree, there are
three cases:
(1) P exists in the index. Everything inside the directory P in
the working tree needs to go when P is checked out from the
index.
(2) P does not exist in the index, but there is P/Q in the index.
We know P will stay a directory when we check out the contents
of the index, but we do not know yet if there is a directory
P/Q in the working tree to be killed, so we need to recurse.
(3) P does not exist in the index, and there is no P/Q in the index
to require P to be a directory, either. Only in this case, we
know that everything inside P will not be killed without
recursing.
Note that this helper is called by treat_leading_path() that decides
if we need to traverse only subdirectories of a single common
leading directory, which is essential for this optimization to be
correct. This caller checks each level of the leading path
component from shallower directory to deeper ones, and that is what
allows us to only check if the path appears in the index. If the
call to treat_one_path() weren't there, given a path P/Q/R, the real
traversal may start from directory P/Q/R, even when the index
records P as a regular file, and we would end up having to check if
any leading subpath in P/Q/R, e.g. P, appears in the index.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-08-15 19:13:46 +00:00
|
|
|
/*
|
|
|
|
* When we are looking at a directory P in the working tree,
|
|
|
|
* there are three cases:
|
|
|
|
*
|
|
|
|
* (1) P exists in the index. Everything inside the directory P in
|
|
|
|
* the working tree needs to go when P is checked out from the
|
|
|
|
* index.
|
|
|
|
*
|
|
|
|
* (2) P does not exist in the index, but there is P/Q in the index.
|
|
|
|
* We know P will stay a directory when we check out the contents
|
|
|
|
* of the index, but we do not know yet if there is a directory
|
|
|
|
* P/Q in the working tree to be killed, so we need to recurse.
|
|
|
|
*
|
|
|
|
* (3) P does not exist in the index, and there is no P/Q in the index
|
|
|
|
* to require P to be a directory, either. Only in this case, we
|
|
|
|
* know that everything inside P will not be killed without
|
|
|
|
* recursing.
|
|
|
|
*/
|
|
|
|
if ((dir->flags & DIR_COLLECT_KILLED_ONLY) &&
|
|
|
|
(dtype == DT_DIR) &&
|
2013-09-17 07:06:17 +00:00
|
|
|
!has_path_in_index &&
|
2017-05-05 19:53:32 +00:00
|
|
|
(directory_exists_in_index(istate, path->buf, path->len) == index_nonexistent))
|
2013-09-17 07:06:17 +00:00
|
|
|
return path_none;
|
2013-04-15 19:13:35 +00:00
|
|
|
|
2020-04-01 04:17:40 +00:00
|
|
|
excluded = is_excluded(dir, istate, path->buf, &dtype);
|
2010-01-09 03:14:07 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Excluded? If we don't explicitly want to show
|
|
|
|
* ignored files, ignore it
|
|
|
|
*/
|
2020-04-01 04:17:40 +00:00
|
|
|
if (excluded && !(dir->flags & (DIR_SHOW_IGNORED|DIR_SHOW_IGNORED_TOO)))
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
return path_excluded;
|
2010-01-09 03:14:07 +00:00
|
|
|
|
|
|
|
switch (dtype) {
|
|
|
|
default:
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
return path_none;
|
2010-01-09 03:14:07 +00:00
|
|
|
case DT_DIR:
|
2017-10-30 17:21:38 +00:00
|
|
|
/*
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
* WARNING: Do not ignore/amend the return value from
|
|
|
|
* treat_directory(), and especially do not change it to return
|
|
|
|
* path_recurse as that can cause exponential slowdown.
|
|
|
|
* Instead, modify treat_directory() to return the right value.
|
2017-10-30 17:21:38 +00:00
|
|
|
*/
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
strbuf_addch(path, '/');
|
|
|
|
return treat_directory(dir, istate, untracked,
|
|
|
|
path->buf, path->len,
|
|
|
|
baselen, excluded, pathspec);
|
2010-01-09 03:14:07 +00:00
|
|
|
case DT_REG:
|
|
|
|
case DT_LNK:
|
Fix error-prone fill_directory() API; make it only return matches
Traditionally, the expected calling convention for the dir.c API was:
fill_directory(&dir, ..., pathspec)
foreach entry in dir->entries:
if (dir_path_match(entry, pathspec))
process_or_display(entry)
This may have made sense once upon a time, because the fill_directory() call
could use cheap checks to avoid doing full pathspec matching, and an external
caller may have wanted to do other post-processing of the results anyway.
However:
* this structure makes it easy for users of the API to get it wrong
* this structure actually makes it harder to understand
fill_directory() and the functions it uses internally. It has
tripped me up several times while trying to fix bugs and
restructure things.
* relying on post-filtering was already found to produce wrong
results; pathspec matching had to be added internally for multiple
cases in order to get the right results (see commits 404ebceda01c
(dir: also check directories for matching pathspecs, 2019-09-17)
and 89a1f4aaf765 (dir: if our pathspec might match files under a
dir, recurse into it, 2019-09-17))
* it's bad for performance: fill_directory() already has to do lots
of checks and knows the subset of cases where it still needs to do
more checks. Forcing external callers to do full pathspec
matching means they must re-check _every_ path.
So, add the pathspec matching within the fill_directory() internals, and
remove it from external callers.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:45 +00:00
|
|
|
if (pathspec &&
|
dir: fix treatment of negated pathspecs
do_match_pathspec() started life as match_pathspec_depth_1() and for
correctness was only supposed to be called from match_pathspec_depth().
match_pathspec_depth() was later renamed to match_pathspec(), so the
invariant we expect today is that do_match_pathspec() has no direct
callers outside of match_pathspec().
Unfortunately, this intention was lost with the renames of the two
functions, and additional calls to do_match_pathspec() were added in
commits 75a6315f74 ("ls-files: add pathspec matching for submodules",
2016-10-07) and 89a1f4aaf7 ("dir: if our pathspec might match files
under a dir, recurse into it", 2019-09-17). Of course,
do_match_pathspec() had an important advantge over match_pathspec() --
match_pathspec() would hardcode flags to one of two values, and these
new callers needed to pass some other value for flags. Also, although
calling do_match_pathspec() directly was incorrect, there likely wasn't
any difference in the observable end output, because the bug just meant
that fill_diretory() would recurse into unneeded directories. Since
subsequent does-this-path-match checks on individual paths under the
directory would cause those extra paths to be filtered out, the only
difference from using the wrong function was unnecessary computation.
The second of those bad calls to do_match_pathspec() was involved -- via
either direct movement or via copying+editing -- into a number of later
refactors. See commits 777b420347 ("dir: synchronize
treat_leading_path() and read_directory_recursive()", 2019-12-19),
8d92fb2927 ("dir: replace exponential algorithm with a linear one",
2020-04-01), and 95c11ecc73 ("Fix error-prone fill_directory() API; make
it only return matches", 2020-04-01). The last of those introduced the
usage of do_match_pathspec() on an individual file, and thus resulted in
individual paths being returned that shouldn't be.
The problem with calling do_match_pathspec() instead of match_pathspec()
is that any negated patterns such as ':!unwanted_path` will be ignored.
Add a new match_pathspec_with_flags() function to fulfill the needs of
specifying special flags while still correctly checking negated
patterns, add a big comment above do_match_pathspec() to prevent others
from misusing it, and correct current callers of do_match_pathspec() to
instead use either match_pathspec() or match_pathspec_with_flags().
One final note is that DO_MATCH_LEADING_PATHSPEC needs special
consideration when working with DO_MATCH_EXCLUDE. The point of
DO_MATCH_LEADING_PATHSPEC is that if we have a pathspec like
*/Makefile
and we are checking a directory path like
src/module/component
that we want to consider it a match so that we recurse into the
directory because it _might_ have a file named Makefile somewhere below.
However, when we are using an exclusion pattern, i.e. we have a pathspec
like
:(exclude)*/Makefile
we do NOT want to say that a directory path like
src/module/component
is a (negative) match. While there *might* be a file named 'Makefile'
somewhere below that directory, there could also be other files and we
cannot pre-emptively rule all the files under that directory out; we
need to recurse and then check individual files. Adjust the
DO_MATCH_LEADING_PATHSPEC logic to only get activated for positive
pathspecs.
Reported-by: John Millikin <jmillikin@stripe.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-06-05 18:23:48 +00:00
|
|
|
!match_pathspec(istate, pathspec, path->buf, path->len,
|
|
|
|
0 /* prefix */, NULL /* seen */,
|
|
|
|
0 /* is_dir */))
|
Fix error-prone fill_directory() API; make it only return matches
Traditionally, the expected calling convention for the dir.c API was:
fill_directory(&dir, ..., pathspec)
foreach entry in dir->entries:
if (dir_path_match(entry, pathspec))
process_or_display(entry)
This may have made sense once upon a time, because the fill_directory() call
could use cheap checks to avoid doing full pathspec matching, and an external
caller may have wanted to do other post-processing of the results anyway.
However:
* this structure makes it easy for users of the API to get it wrong
* this structure actually makes it harder to understand
fill_directory() and the functions it uses internally. It has
tripped me up several times while trying to fix bugs and
restructure things.
* relying on post-filtering was already found to produce wrong
results; pathspec matching had to be added internally for multiple
cases in order to get the right results (see commits 404ebceda01c
(dir: also check directories for matching pathspecs, 2019-09-17)
and 89a1f4aaf765 (dir: if our pathspec might match files under a
dir, recurse into it, 2019-09-17))
* it's bad for performance: fill_directory() already has to do lots
of checks and knows the subset of cases where it still needs to do
more checks. Forcing external callers to do full pathspec
matching means they must re-check _every_ path.
So, add the pathspec matching within the fill_directory() internals, and
remove it from external callers.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:45 +00:00
|
|
|
return path_none;
|
2020-07-20 18:45:29 +00:00
|
|
|
if (excluded)
|
|
|
|
return path_excluded;
|
Fix error-prone fill_directory() API; make it only return matches
Traditionally, the expected calling convention for the dir.c API was:
fill_directory(&dir, ..., pathspec)
foreach entry in dir->entries:
if (dir_path_match(entry, pathspec))
process_or_display(entry)
This may have made sense once upon a time, because the fill_directory() call
could use cheap checks to avoid doing full pathspec matching, and an external
caller may have wanted to do other post-processing of the results anyway.
However:
* this structure makes it easy for users of the API to get it wrong
* this structure actually makes it harder to understand
fill_directory() and the functions it uses internally. It has
tripped me up several times while trying to fix bugs and
restructure things.
* relying on post-filtering was already found to produce wrong
results; pathspec matching had to be added internally for multiple
cases in order to get the right results (see commits 404ebceda01c
(dir: also check directories for matching pathspecs, 2019-09-17)
and 89a1f4aaf765 (dir: if our pathspec might match files under a
dir, recurse into it, 2019-09-17))
* it's bad for performance: fill_directory() already has to do lots
of checks and knows the subset of cases where it still needs to do
more checks. Forcing external callers to do full pathspec
matching means they must re-check _every_ path.
So, add the pathspec matching within the fill_directory() internals, and
remove it from external callers.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:45 +00:00
|
|
|
return path_untracked;
|
2010-01-09 03:14:07 +00:00
|
|
|
}
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void add_untracked(struct untracked_cache_dir *dir, const char *name)
|
|
|
|
{
|
|
|
|
if (!dir)
|
|
|
|
return;
|
|
|
|
ALLOC_GROW(dir->untracked, dir->untracked_nr + 1,
|
|
|
|
dir->untracked_alloc);
|
|
|
|
dir->untracked[dir->untracked_nr++] = xstrdup(name);
|
2010-01-09 04:56:16 +00:00
|
|
|
}
|
|
|
|
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
static int valid_cached_dir(struct dir_struct *dir,
|
|
|
|
struct untracked_cache_dir *untracked,
|
2017-05-05 19:53:31 +00:00
|
|
|
struct index_state *istate,
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
struct strbuf *path,
|
|
|
|
int check_only)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
if (!untracked)
|
|
|
|
return 0;
|
|
|
|
|
2017-09-22 16:35:40 +00:00
|
|
|
/*
|
|
|
|
* With fsmonitor, we can trust the untracked cache's valid field.
|
|
|
|
*/
|
|
|
|
refresh_fsmonitor(istate);
|
|
|
|
if (!(dir->untracked->use_fsmonitor && untracked->valid)) {
|
2018-01-24 09:30:20 +00:00
|
|
|
if (lstat(path->len ? path->buf : ".", &st)) {
|
2017-09-22 16:35:40 +00:00
|
|
|
memset(&untracked->stat_data, 0, sizeof(untracked->stat_data));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (!untracked->valid ||
|
|
|
|
match_stat_data_racy(istate, &untracked->stat_data, &st)) {
|
|
|
|
fill_stat_data(&untracked->stat_data, &st);
|
|
|
|
return 0;
|
|
|
|
}
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
}
|
|
|
|
|
dir.c: fix missing dir invalidation in untracked code
Let's start with how create a new directory cache after the last one
becomes invalid (e.g. because its dir mtime has changed...). In
open_cached_dir():
1. We start out with valid_cached_dir() returning false, which should
call invalidate_directory() to put a directory state back to
initial state, no untracked entries (untracked_nr zero), no sub
directory traversal (dirs[].recurse zero).
2. Since the cache cannot be used, we go the slow path opendir() and
go through items one by one via readdir(). All the directories on
disk will be added back to the cache (if not already exist in
dirs[]) and its flag "recurse" gets changed to one to note that
it's part of the cached dir travesal next time.
3. By the time we reach close_cached_dir() we should have a good
subdir list in dirs[]. Those with "recurse" flag set are the ones
present in the on-disk directory. The directory is now marked
"valid".
Next time read_directory() is called, since the directory is marked
valid, it will skip readdir(), go fast path and traverse through
dirs[] array instead.
Steps one and two need some tight cooperation. If a subdir is removed,
readdir() will not find it and of course we cannot examine/invalidate
it. To make sure removed directories on disk are gone from the cache,
step one must make sure recurse flag of all subdirs are zero.
But that's not true. If "valid" flag is already false, there is a
chance we go straight to the end of valid_cached_dir() without calling
invalidate_directory(). Or we fail to meet the "if (untracked-valid)"
condition and skip over the invalidate_directory().
After step 3, we mark the cache valid. Any stale subdir with incorrect
recurse flag becomes a real subdir next time we traverse the directory
using dirs[] array.
We could avoid this by making sure invalidate_directory() is always
called (therefore dirs[].recurse cleared) at the beginning of
open_cached_dir(). Which is what this patch does.
As to how we get into this situation, the key in the test is this
command
git checkout master
where "one/file" is replaced with "one" in the index. This index
update triggers untracked_cache_invalidate_path(), which clears valid
flag of the root directory while keeping "recurse" flag on the subdir
"one" on. On the next git-status, we go through steps 1-3 above and
save an incorrect cache on disk. The second git-status blindly follows
the bad cache data and shows the problem.
This is arguably because of a bad design where "recurse" flag plays
double roles: whether a directory should be saved on disk, and whether
it is part of a directory traversal.
We need to keep recurse flag set at "checkout master" because of the
first role: we need to keep subdir caches (dir "two" for example has
not been touched at all, no reason to throw its cache away).
As long as we make sure to ignore/reset "recurse" flag at the
beginning of a directory traversal, we're good. But maybe eventually
we should separate these two roles.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-01-24 09:30:21 +00:00
|
|
|
if (untracked->check_only != !!check_only)
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* prep_exclude will be called eventually on this directory,
|
2019-09-03 18:04:57 +00:00
|
|
|
* but it's called much later in last_matching_pattern(). We
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
* need it now to determine the validity of the cache for this
|
|
|
|
* path. The next calls will be nearly no-op, the way
|
|
|
|
* prep_exclude() is designed.
|
|
|
|
*/
|
|
|
|
if (path->len && path->buf[path->len - 1] != '/') {
|
|
|
|
strbuf_addch(path, '/');
|
2017-05-05 19:53:31 +00:00
|
|
|
prep_exclude(dir, istate, path->buf, path->len);
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
strbuf_setlen(path, path->len - 1);
|
|
|
|
} else
|
2017-05-05 19:53:31 +00:00
|
|
|
prep_exclude(dir, istate, path->buf, path->len);
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
|
|
|
|
/* hopefully prep_exclude() haven't invalidated this entry... */
|
|
|
|
return untracked->valid;
|
|
|
|
}
|
|
|
|
|
2015-03-08 10:12:28 +00:00
|
|
|
static int open_cached_dir(struct cached_dir *cdir,
|
|
|
|
struct dir_struct *dir,
|
|
|
|
struct untracked_cache_dir *untracked,
|
2017-05-05 19:53:31 +00:00
|
|
|
struct index_state *istate,
|
2015-03-08 10:12:28 +00:00
|
|
|
struct strbuf *path,
|
|
|
|
int check_only)
|
|
|
|
{
|
2018-01-24 09:30:23 +00:00
|
|
|
const char *c_path;
|
|
|
|
|
2015-03-08 10:12:28 +00:00
|
|
|
memset(cdir, 0, sizeof(*cdir));
|
|
|
|
cdir->untracked = untracked;
|
2017-05-05 19:53:31 +00:00
|
|
|
if (valid_cached_dir(dir, untracked, istate, path, check_only))
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
return 0;
|
2018-01-24 09:30:23 +00:00
|
|
|
c_path = path->len ? path->buf : ".";
|
|
|
|
cdir->fdir = opendir(c_path);
|
|
|
|
if (!cdir->fdir)
|
|
|
|
warning_errno(_("could not open directory '%s'"), c_path);
|
dir.c: fix missing dir invalidation in untracked code
Let's start with how create a new directory cache after the last one
becomes invalid (e.g. because its dir mtime has changed...). In
open_cached_dir():
1. We start out with valid_cached_dir() returning false, which should
call invalidate_directory() to put a directory state back to
initial state, no untracked entries (untracked_nr zero), no sub
directory traversal (dirs[].recurse zero).
2. Since the cache cannot be used, we go the slow path opendir() and
go through items one by one via readdir(). All the directories on
disk will be added back to the cache (if not already exist in
dirs[]) and its flag "recurse" gets changed to one to note that
it's part of the cached dir travesal next time.
3. By the time we reach close_cached_dir() we should have a good
subdir list in dirs[]. Those with "recurse" flag set are the ones
present in the on-disk directory. The directory is now marked
"valid".
Next time read_directory() is called, since the directory is marked
valid, it will skip readdir(), go fast path and traverse through
dirs[] array instead.
Steps one and two need some tight cooperation. If a subdir is removed,
readdir() will not find it and of course we cannot examine/invalidate
it. To make sure removed directories on disk are gone from the cache,
step one must make sure recurse flag of all subdirs are zero.
But that's not true. If "valid" flag is already false, there is a
chance we go straight to the end of valid_cached_dir() without calling
invalidate_directory(). Or we fail to meet the "if (untracked-valid)"
condition and skip over the invalidate_directory().
After step 3, we mark the cache valid. Any stale subdir with incorrect
recurse flag becomes a real subdir next time we traverse the directory
using dirs[] array.
We could avoid this by making sure invalidate_directory() is always
called (therefore dirs[].recurse cleared) at the beginning of
open_cached_dir(). Which is what this patch does.
As to how we get into this situation, the key in the test is this
command
git checkout master
where "one/file" is replaced with "one" in the index. This index
update triggers untracked_cache_invalidate_path(), which clears valid
flag of the root directory while keeping "recurse" flag on the subdir
"one" on. On the next git-status, we go through steps 1-3 above and
save an incorrect cache on disk. The second git-status blindly follows
the bad cache data and shows the problem.
This is arguably because of a bad design where "recurse" flag plays
double roles: whether a directory should be saved on disk, and whether
it is part of a directory traversal.
We need to keep recurse flag set at "checkout master" because of the
first role: we need to keep subdir caches (dir "two" for example has
not been touched at all, no reason to throw its cache away).
As long as we make sure to ignore/reset "recurse" flag at the
beginning of a directory traversal, we're good. But maybe eventually
we should separate these two roles.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-01-24 09:30:21 +00:00
|
|
|
if (dir->untracked) {
|
|
|
|
invalidate_directory(dir->untracked, untracked);
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
dir->untracked->dir_opened++;
|
dir.c: fix missing dir invalidation in untracked code
Let's start with how create a new directory cache after the last one
becomes invalid (e.g. because its dir mtime has changed...). In
open_cached_dir():
1. We start out with valid_cached_dir() returning false, which should
call invalidate_directory() to put a directory state back to
initial state, no untracked entries (untracked_nr zero), no sub
directory traversal (dirs[].recurse zero).
2. Since the cache cannot be used, we go the slow path opendir() and
go through items one by one via readdir(). All the directories on
disk will be added back to the cache (if not already exist in
dirs[]) and its flag "recurse" gets changed to one to note that
it's part of the cached dir travesal next time.
3. By the time we reach close_cached_dir() we should have a good
subdir list in dirs[]. Those with "recurse" flag set are the ones
present in the on-disk directory. The directory is now marked
"valid".
Next time read_directory() is called, since the directory is marked
valid, it will skip readdir(), go fast path and traverse through
dirs[] array instead.
Steps one and two need some tight cooperation. If a subdir is removed,
readdir() will not find it and of course we cannot examine/invalidate
it. To make sure removed directories on disk are gone from the cache,
step one must make sure recurse flag of all subdirs are zero.
But that's not true. If "valid" flag is already false, there is a
chance we go straight to the end of valid_cached_dir() without calling
invalidate_directory(). Or we fail to meet the "if (untracked-valid)"
condition and skip over the invalidate_directory().
After step 3, we mark the cache valid. Any stale subdir with incorrect
recurse flag becomes a real subdir next time we traverse the directory
using dirs[] array.
We could avoid this by making sure invalidate_directory() is always
called (therefore dirs[].recurse cleared) at the beginning of
open_cached_dir(). Which is what this patch does.
As to how we get into this situation, the key in the test is this
command
git checkout master
where "one/file" is replaced with "one" in the index. This index
update triggers untracked_cache_invalidate_path(), which clears valid
flag of the root directory while keeping "recurse" flag on the subdir
"one" on. On the next git-status, we go through steps 1-3 above and
save an incorrect cache on disk. The second git-status blindly follows
the bad cache data and shows the problem.
This is arguably because of a bad design where "recurse" flag plays
double roles: whether a directory should be saved on disk, and whether
it is part of a directory traversal.
We need to keep recurse flag set at "checkout master" because of the
first role: we need to keep subdir caches (dir "two" for example has
not been touched at all, no reason to throw its cache away).
As long as we make sure to ignore/reset "recurse" flag at the
beginning of a directory traversal, we're good. But maybe eventually
we should separate these two roles.
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-01-24 09:30:21 +00:00
|
|
|
}
|
2015-03-08 10:12:28 +00:00
|
|
|
if (!cdir->fdir)
|
|
|
|
return -1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int read_cached_dir(struct cached_dir *cdir)
|
|
|
|
{
|
2020-01-16 20:21:55 +00:00
|
|
|
struct dirent *de;
|
|
|
|
|
2015-03-08 10:12:28 +00:00
|
|
|
if (cdir->fdir) {
|
2021-05-12 17:28:22 +00:00
|
|
|
de = readdir_skip_dot_and_dotdot(cdir->fdir);
|
2020-01-16 20:21:55 +00:00
|
|
|
if (!de) {
|
|
|
|
cdir->d_name = NULL;
|
|
|
|
cdir->d_type = DT_UNKNOWN;
|
2015-03-08 10:12:28 +00:00
|
|
|
return -1;
|
2020-01-16 20:21:55 +00:00
|
|
|
}
|
|
|
|
cdir->d_name = de->d_name;
|
|
|
|
cdir->d_type = DTYPE(de);
|
2015-03-08 10:12:28 +00:00
|
|
|
return 0;
|
|
|
|
}
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
while (cdir->nr_dirs < cdir->untracked->dirs_nr) {
|
|
|
|
struct untracked_cache_dir *d = cdir->untracked->dirs[cdir->nr_dirs];
|
2015-03-08 10:12:30 +00:00
|
|
|
if (!d->recurse) {
|
|
|
|
cdir->nr_dirs++;
|
|
|
|
continue;
|
|
|
|
}
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
cdir->ucd = d;
|
|
|
|
cdir->nr_dirs++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
cdir->ucd = NULL;
|
|
|
|
if (cdir->nr_files < cdir->untracked->untracked_nr) {
|
|
|
|
struct untracked_cache_dir *d = cdir->untracked;
|
|
|
|
cdir->file = d->untracked[cdir->nr_files++];
|
|
|
|
return 0;
|
|
|
|
}
|
2015-03-08 10:12:28 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void close_cached_dir(struct cached_dir *cdir)
|
|
|
|
{
|
|
|
|
if (cdir->fdir)
|
|
|
|
closedir(cdir->fdir);
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
/*
|
|
|
|
* We have gone through this directory and found no untracked
|
|
|
|
* entries. Mark it valid.
|
|
|
|
*/
|
2015-03-08 10:12:30 +00:00
|
|
|
if (cdir->untracked) {
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
cdir->untracked->valid = 1;
|
2015-03-08 10:12:30 +00:00
|
|
|
cdir->untracked->recurse = 1;
|
|
|
|
}
|
2010-01-09 04:56:16 +00:00
|
|
|
}
|
|
|
|
|
2019-12-10 20:00:24 +00:00
|
|
|
static void add_path_to_appropriate_result_list(struct dir_struct *dir,
|
|
|
|
struct untracked_cache_dir *untracked,
|
|
|
|
struct cached_dir *cdir,
|
|
|
|
struct index_state *istate,
|
|
|
|
struct strbuf *path,
|
|
|
|
int baselen,
|
|
|
|
const struct pathspec *pathspec,
|
|
|
|
enum path_treatment state)
|
|
|
|
{
|
|
|
|
/* add the path to the appropriate result list */
|
|
|
|
switch (state) {
|
|
|
|
case path_excluded:
|
|
|
|
if (dir->flags & DIR_SHOW_IGNORED)
|
|
|
|
dir_add_name(dir, istate, path->buf, path->len);
|
|
|
|
else if ((dir->flags & DIR_SHOW_IGNORED_TOO) ||
|
|
|
|
((dir->flags & DIR_COLLECT_IGNORED) &&
|
|
|
|
exclude_matches_pathspec(path->buf, path->len,
|
|
|
|
pathspec)))
|
|
|
|
dir_add_ignored(dir, istate, path->buf, path->len);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case path_untracked:
|
|
|
|
if (dir->flags & DIR_SHOW_IGNORED)
|
|
|
|
break;
|
|
|
|
dir_add_name(dir, istate, path->buf, path->len);
|
|
|
|
if (cdir->fdir)
|
|
|
|
add_untracked(untracked, path->buf + baselen);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-05-17 02:02:14 +00:00
|
|
|
/*
|
|
|
|
* Read a directory tree. We currently ignore anything but
|
|
|
|
* directories, regular files and symlinks. That's because git
|
|
|
|
* doesn't handle them at all yet. Maybe that will change some
|
|
|
|
* day.
|
|
|
|
*
|
|
|
|
* Also, we ignore the name ".git" (even if it is not a directory).
|
|
|
|
* That likely will not change.
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
*
|
2017-09-18 17:24:33 +00:00
|
|
|
* If 'stop_at_first_file' is specified, 'path_excluded' is returned
|
|
|
|
* to signal that a file was found. This is the least significant value that
|
|
|
|
* indicates that a file was encountered that does not depend on the order of
|
2020-04-01 04:17:37 +00:00
|
|
|
* whether an untracked or excluded path was encountered first.
|
2017-09-18 17:24:33 +00:00
|
|
|
*
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
* Returns the most significant path_treatment value encountered in the scan.
|
2017-09-18 17:24:33 +00:00
|
|
|
* If 'stop_at_first_file' is specified, `path_excluded` is the most
|
|
|
|
* significant path_treatment value that will be returned.
|
2006-05-17 02:02:14 +00:00
|
|
|
*/
|
2017-09-18 17:24:33 +00:00
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
static enum path_treatment read_directory_recursive(struct dir_struct *dir,
|
2017-05-05 19:53:32 +00:00
|
|
|
struct index_state *istate, const char *base, int baselen,
|
|
|
|
struct untracked_cache_dir *untracked, int check_only,
|
2017-09-18 17:24:33 +00:00
|
|
|
int stop_at_first_file, const struct pathspec *pathspec)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
2019-12-19 21:28:25 +00:00
|
|
|
/*
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
* WARNING: Do NOT recurse unless path_recurse is returned from
|
|
|
|
* treat_path(). Recursing on any other return value
|
|
|
|
* can result in exponential slowdown.
|
2019-12-19 21:28:25 +00:00
|
|
|
*/
|
2015-03-08 10:12:28 +00:00
|
|
|
struct cached_dir cdir;
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
enum path_treatment state, subdir_state, dir_state = path_none;
|
2012-05-08 16:43:40 +00:00
|
|
|
struct strbuf path = STRBUF_INIT;
|
2006-05-17 02:02:14 +00:00
|
|
|
|
2012-05-08 16:43:40 +00:00
|
|
|
strbuf_add(&path, base, baselen);
|
2011-10-24 06:36:11 +00:00
|
|
|
|
2017-05-05 19:53:32 +00:00
|
|
|
if (open_cached_dir(&cdir, dir, untracked, istate, &path, check_only))
|
2012-05-11 14:53:07 +00:00
|
|
|
goto out;
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.visited_directories++;
|
2012-05-11 14:53:07 +00:00
|
|
|
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
if (untracked)
|
|
|
|
untracked->check_only = !!check_only;
|
|
|
|
|
2015-03-08 10:12:28 +00:00
|
|
|
while (!read_cached_dir(&cdir)) {
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
/* check how the file or directory should be treated */
|
2017-05-05 19:53:32 +00:00
|
|
|
state = treat_path(dir, untracked, &cdir, istate, &path,
|
2017-01-04 18:03:57 +00:00
|
|
|
baselen, pathspec);
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.visited_paths++;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
if (state > dir_state)
|
|
|
|
dir_state = state;
|
|
|
|
|
|
|
|
/* recurse into subdir if instructed by treat_path */
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
if (state == path_recurse) {
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
struct untracked_cache_dir *ud;
|
2023-02-27 15:28:10 +00:00
|
|
|
ud = lookup_untracked(dir->untracked,
|
|
|
|
untracked,
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
path.buf + baselen,
|
|
|
|
path.len - baselen);
|
|
|
|
subdir_state =
|
2017-05-05 19:53:32 +00:00
|
|
|
read_directory_recursive(dir, istate, path.buf,
|
2017-01-04 18:03:57 +00:00
|
|
|
path.len, ud,
|
2017-09-18 17:24:33 +00:00
|
|
|
check_only, stop_at_first_file, pathspec);
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
if (subdir_state > dir_state)
|
|
|
|
dir_state = subdir_state;
|
dir: also check directories for matching pathspecs
Even if a directory doesn't match a pathspec, it is possible, depending
on the precise pathspecs, that some file underneath it might. So we
special case and recurse into the directory for such situations. However,
we previously always added any untracked directory that we recursed into
to the list of untracked paths, regardless of whether the directory
itself matched the pathspec.
For the case of git-clean and a set of pathspecs of "dir/file" and "more",
this caused a problem because we'd end up with dir entries for both of
"dir"
"dir/file"
Then correct_untracked_entries() would try to helpfully prune duplicates
for us by removing "dir/file" since it's under "dir", leaving us with
"dir"
Since the original pathspec only had "dir/file", the only entry left
doesn't match and leaves nothing to be removed. (Note that if only one
pathspec was specified, e.g. only "dir/file", then the common_prefix_len
optimizations in fill_directory would cause us to bypass this problem,
making it appear in simple tests that we could correctly remove manually
specified pathspecs.)
Fix this by actually checking whether the directory we are about to add
to the list of dir entries actually matches the pathspec; only do this
matching check after we have already returned from recursing into the
directory.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-17 16:34:56 +00:00
|
|
|
|
dir: special case check for the possibility that pathspec is NULL
Commits 404ebceda01c ("dir: also check directories for matching
pathspecs", 2019-09-17) and 89a1f4aaf765 ("dir: if our pathspec might
match files under a dir, recurse into it", 2019-09-17) added calls to
match_pathspec() and do_match_pathspec() passing along their pathspec
parameter. Both match_pathspec() and do_match_pathspec() assume the
pathspec argument they are given is non-NULL. It turns out that
unpack-tree.c's verify_clean_subdirectory() calls read_directory() with
pathspec == NULL, and it is possible on case insensitive filesystems for
that NULL to make it to these new calls to match_pathspec() and
do_match_pathspec(). Add appropriate checks on the NULLness of pathspec
to avoid a segfault.
In case the negation throws anyone off (one of the calls was to
do_match_pathspec() while the other was to !match_pathspec(), yet no
negation of the NULLness of pathspec is used), there are two ways to
understand the differences:
* The code already handled the pathspec == NULL cases before this
series, and this series only tried to change behavior when there was
a pathspec, thus we only want to go into the if-block if pathspec is
non-NULL.
* One of the calls is for whether to recurse into a subdirectory, the
other is for after we've recursed into it for whether we want to
remove the subdirectory itself (i.e. the subdirectory didn't match
but something under it could have). That difference in situation
leads to the slight differences in logic used (well, that and the
slightly unusual fact that we don't want empty pathspecs to remove
untracked directories by default).
Denton found and analyzed one issue and provided the patch for the
match_pathspec() call, SZEDER figured out why the issue only reproduced
for some folks and not others and provided the testcase, and I looked
through the remainder of the series and noted the do_match_pathspec()
call that should have the same check.
Co-authored-by: Denton Liu <liu.denton@gmail.com>
Co-authored-by: SZEDER Gábor <szeder.dev@gmail.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-10-01 18:55:24 +00:00
|
|
|
if (pathspec &&
|
|
|
|
!match_pathspec(istate, pathspec, path.buf, path.len,
|
dir: also check directories for matching pathspecs
Even if a directory doesn't match a pathspec, it is possible, depending
on the precise pathspecs, that some file underneath it might. So we
special case and recurse into the directory for such situations. However,
we previously always added any untracked directory that we recursed into
to the list of untracked paths, regardless of whether the directory
itself matched the pathspec.
For the case of git-clean and a set of pathspecs of "dir/file" and "more",
this caused a problem because we'd end up with dir entries for both of
"dir"
"dir/file"
Then correct_untracked_entries() would try to helpfully prune duplicates
for us by removing "dir/file" since it's under "dir", leaving us with
"dir"
Since the original pathspec only had "dir/file", the only entry left
doesn't match and leaves nothing to be removed. (Note that if only one
pathspec was specified, e.g. only "dir/file", then the common_prefix_len
optimizations in fill_directory would cause us to bypass this problem,
making it appear in simple tests that we could correctly remove manually
specified pathspecs.)
Fix this by actually checking whether the directory we are about to add
to the list of dir entries actually matches the pathspec; only do this
matching check after we have already returned from recursing into the
directory.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-09-17 16:34:56 +00:00
|
|
|
0 /* prefix */, NULL,
|
|
|
|
0 /* do NOT special case dirs */))
|
|
|
|
state = path_none;
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (check_only) {
|
2017-09-18 17:24:33 +00:00
|
|
|
if (stop_at_first_file) {
|
|
|
|
/*
|
|
|
|
* If stopping at first file, then
|
|
|
|
* signal that a file was found by
|
|
|
|
* returning `path_excluded`. This is
|
|
|
|
* to return a consistent value
|
|
|
|
* regardless of whether an ignored or
|
|
|
|
* excluded file happened to be
|
|
|
|
* encountered 1st.
|
|
|
|
*
|
|
|
|
* In current usage, the
|
|
|
|
* `stop_at_first_file` is passed when
|
|
|
|
* an ancestor directory has matched
|
|
|
|
* an exclude pattern, so any found
|
|
|
|
* files will be excluded.
|
|
|
|
*/
|
|
|
|
if (dir_state >= path_excluded) {
|
|
|
|
dir_state = path_excluded;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
/* abort early if maximum state has been reached */
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
if (dir_state == path_untracked) {
|
untracked cache: record/validate dir mtime and reuse cached output
The main readdir loop in read_directory_recursive() is replaced with a
new one that checks if cached results of a directory is still valid.
If a file is added or removed from the index, the containing directory
is invalidated (but not its subdirs). If directory's mtime is changed,
the same happens. If a .gitignore is updated, the containing directory
and all subdirs are invalidated recursively. If dir_struct#flags or
other conditions change, the cache is ignored.
If a directory is invalidated, we opendir/readdir/closedir and run the
exclude machinery on that directory listing as usual. If untracked
cache is also enabled, we'll update the cache along the way. If a
directory is validated, we simply pull the untracked listing out from
the cache. The cache also records the list of direct subdirs that we
have to recurse in. Fully excluded directories are seen as "untracked
files".
In the best case when no dirs are invalidated, read_directory()
becomes a series of
stat(dir), open(.gitignore), fstat(), read(), close() and optionally
hash_sha1_file()
For comparison, standard read_directory() is a sequence of
opendir(), readdir(), open(.gitignore), fstat(), read(), close(), the
expensive last_exclude_matching() and closedir().
We already try not to open(.gitignore) if we know it does not exist,
so open/fstat/read/close sequence does not apply to every
directory. The sequence could be reduced further, as noted in
prep_exclude() in another patch. So in theory, the entire best-case
read_directory sequence could be reduced to a series of stat() and
nothing else.
This is not a silver bullet approach. When you compile a C file, for
example, the old .o file is removed and a new one with the same name
created, effectively invalidating the containing directory's cache
(but not its subdirectories). If your build process touches every
directory, this cache adds extra overhead for nothing, so it's a good
idea to separate generated files from tracked files.. Editors may use
the same strategy for saving files. And of course you're out of luck
running your repo on an unsupported filesystem and/or operating system.
Helped-by: Eric Sunshine <sunshine@sunshineco.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:29 +00:00
|
|
|
if (cdir.fdir)
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
add_untracked(untracked, path.buf + baselen);
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
break;
|
untracked cache: record .gitignore information and dir hierarchy
The idea is if we can capture all input and (non-rescursive) output of
read_directory_recursive(), and can verify later that all the input is
the same, then the second r_d_r() should produce the same output as in
the first run.
The requirement for this to work is stat info of a directory MUST
change if an entry is added to or removed from that directory (and
should not change often otherwise). If your OS and filesystem do not
meet this requirement, untracked cache is not for you. Most file
systems on *nix should be fine. On Windows, NTFS is fine while FAT may
not be [1] even though FAT on Linux seems to be fine.
The list of input of r_d_r() is in the big comment block in dir.h. In
short, the output of a directory (not counting subdirs) mainly depends
on stat info of the directory in question, all .gitignore leading to
it and the check_only flag when r_d_r() is called recursively. This
patch records all this info (and the output) as r_d_r() runs.
Two hash_sha1_file() are required for $GIT_DIR/info/exclude and
core.excludesfile unless their stat data matches. hash_sha1_file() is
only needed when .gitignore files in the worktree are modified,
otherwise their SHA-1 in index is used (see the previous patch).
We could store stat data for .gitignore files so we don't have to
rehash them if their content is different from index, but I think
.gitignore files are rarely modified, so not worth extra cache data
(and hashing penalty read-cache.c:verify_hdr(), as we will be storing
this as an index extension).
The implication is, if you change .gitignore, you better add it to the
index soon or you lose all the benefit of untracked cache because a
modified .gitignore invalidates all subdirs recursively. This is
especially bad for .gitignore at root.
This cached output is about untracked files only, not ignored files
because the number of tracked files is usually small, so small cache
overhead, while the number of ignored files could go really high
(e.g. *.o files mixing with source code).
[1] "Description of NTFS date and time stamps for files and folders"
http://support.microsoft.com/kb/299648
Helped-by: Torsten Bögershausen <tboegi@web.de>
Helped-by: David Turner <dturner@twopensource.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-08 10:12:25 +00:00
|
|
|
}
|
2020-04-01 04:17:39 +00:00
|
|
|
/* skip the add_path_to_appropriate_result_list() */
|
2011-10-24 06:36:11 +00:00
|
|
|
continue;
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
|
2019-12-10 20:00:24 +00:00
|
|
|
add_path_to_appropriate_result_list(dir, untracked, &cdir,
|
|
|
|
istate, &path, baselen,
|
|
|
|
pathspec, state);
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
2015-03-08 10:12:28 +00:00
|
|
|
close_cached_dir(&cdir);
|
2012-05-11 14:53:07 +00:00
|
|
|
out:
|
2012-05-08 16:43:40 +00:00
|
|
|
strbuf_release(&path);
|
2006-05-17 02:02:14 +00:00
|
|
|
|
dir.c: git-status --ignored: don't scan the work tree three times
'git-status --ignored' recursively scans directories up to three times:
1. To collect untracked files.
2. To collect ignored files.
3. When collecting ignored files, to check that an untracked directory
that potentially contains ignored files doesn't also contain untracked
files (i.e. isn't already listed as untracked).
Let's get rid of case 3 first.
Currently, read_directory_recursive returns a boolean whether a directory
contains the requested files or not (actually, it returns the number of
files, but no caller actually needs that), and DIR_SHOW_IGNORED specifies
what we're looking for.
To be able to test for both untracked and ignored files in a single scan,
we need to return a bit more info, and the result must be independent of
the DIR_SHOW_IGNORED flag.
Reuse the path_treatment enum as return value of read_directory_recursive.
Split path_handled in two separate values path_excluded and path_untracked
that don't change their meaning with the DIR_SHOW_IGNORED flag. We don't
need an extra value path_untracked_and_excluded, as directories with both
untracked and ignored files should be listed as untracked.
Rename path_ignored to path_none for clarity (i.e. "don't treat that path"
in contrast to "the path is ignored and should be treated according to
DIR_SHOW_IGNORED").
Replace enum directory_treatment with path_treatment. That's just another
enum with the same meaning, no need to translate back and forth.
In treat_directory, get rid of the extra read_directory_recursive call and
all the DIR_SHOW_IGNORED-specific code.
In read_directory_recursive, decide whether to dir_add_name path_excluded
or path_untracked paths based on the DIR_SHOW_IGNORED flag.
The return value of read_directory_recursive is the maximum path_treatment
of all files and sub-directories. In the check_only case, abort when we've
reached the most significant value (path_untracked).
Signed-off-by: Karsten Blees <blees@dcon.de>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-15 19:14:22 +00:00
|
|
|
return dir_state;
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
|
|
|
|
2017-05-18 08:21:53 +00:00
|
|
|
int cmp_dir_entry(const void *p1, const void *p2)
|
2006-05-17 02:02:14 +00:00
|
|
|
{
|
|
|
|
const struct dir_entry *e1 = *(const struct dir_entry **)p1;
|
|
|
|
const struct dir_entry *e2 = *(const struct dir_entry **)p2;
|
|
|
|
|
2014-06-20 02:06:44 +00:00
|
|
|
return name_compare(e1->name, e1->len, e2->name, e2->len);
|
2006-05-17 02:02:14 +00:00
|
|
|
}
|
|
|
|
|
2017-05-18 08:21:52 +00:00
|
|
|
/* check if *out lexically strictly contains *in */
|
2017-05-18 08:21:53 +00:00
|
|
|
int check_dir_entry_contains(const struct dir_entry *out, const struct dir_entry *in)
|
2017-05-18 08:21:52 +00:00
|
|
|
{
|
|
|
|
return (out->len < in->len) &&
|
|
|
|
(out->name[out->len - 1] == '/') &&
|
|
|
|
!memcmp(out->name, in->name, out->len);
|
|
|
|
}
|
|
|
|
|
2010-01-09 07:05:41 +00:00
|
|
|
static int treat_leading_path(struct dir_struct *dir,
|
2017-05-05 19:53:32 +00:00
|
|
|
struct index_state *istate,
|
2010-01-09 07:05:41 +00:00
|
|
|
const char *path, int len,
|
2017-01-04 18:03:57 +00:00
|
|
|
const struct pathspec *pathspec)
|
2010-01-09 07:05:41 +00:00
|
|
|
{
|
2012-05-01 11:25:24 +00:00
|
|
|
struct strbuf sb = STRBUF_INIT;
|
2020-01-16 20:21:55 +00:00
|
|
|
struct strbuf subdir = STRBUF_INIT;
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
int prevlen, baselen;
|
2010-01-09 07:05:41 +00:00
|
|
|
const char *cp;
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
struct cached_dir cdir;
|
|
|
|
enum path_treatment state = path_none;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For each directory component of path, we are going to check whether
|
|
|
|
* that path is relevant given the pathspec. For example, if path is
|
|
|
|
* foo/bar/baz/
|
|
|
|
* then we will ask treat_path() whether we should go into foo, then
|
|
|
|
* whether we should go into bar, then whether baz is relevant.
|
|
|
|
* Checking each is important because e.g. if path is
|
|
|
|
* .git/info/
|
|
|
|
* then we need to check .git to know we shouldn't traverse it.
|
|
|
|
* If the return from treat_path() is:
|
|
|
|
* * path_none, for any path, we return false.
|
|
|
|
* * path_recurse, for all path components, we return true
|
|
|
|
* * <anything else> for some intermediate component, we make sure
|
|
|
|
* to add that path to the relevant list but return false
|
|
|
|
* signifying that we shouldn't recurse into it.
|
|
|
|
*/
|
2010-01-09 07:05:41 +00:00
|
|
|
|
|
|
|
while (len && path[len - 1] == '/')
|
|
|
|
len--;
|
|
|
|
if (!len)
|
|
|
|
return 1;
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
|
|
|
|
memset(&cdir, 0, sizeof(cdir));
|
2020-01-16 20:21:55 +00:00
|
|
|
cdir.d_type = DT_DIR;
|
2010-01-09 07:05:41 +00:00
|
|
|
baselen = 0;
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
prevlen = 0;
|
2010-01-09 07:05:41 +00:00
|
|
|
while (1) {
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
prevlen = baselen + !!baselen;
|
|
|
|
cp = path + prevlen;
|
2010-01-09 07:05:41 +00:00
|
|
|
cp = memchr(cp, '/', path + len - cp);
|
|
|
|
if (!cp)
|
|
|
|
baselen = len;
|
|
|
|
else
|
|
|
|
baselen = cp - path;
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
strbuf_reset(&sb);
|
2012-05-01 11:25:24 +00:00
|
|
|
strbuf_add(&sb, path, baselen);
|
|
|
|
if (!is_directory(sb.buf))
|
|
|
|
break;
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
strbuf_reset(&sb);
|
|
|
|
strbuf_add(&sb, path, prevlen);
|
2020-01-16 20:21:55 +00:00
|
|
|
strbuf_reset(&subdir);
|
|
|
|
strbuf_add(&subdir, path+prevlen, baselen-prevlen);
|
|
|
|
cdir.d_name = subdir.buf;
|
dir: replace exponential algorithm with a linear one
dir's read_directory_recursive() naturally operates recursively in order
to walk the directory tree. Treating of directories is sometimes weird
because there are so many different permutations about how to handle
directories. Some examples:
* 'git ls-files -o --directory' only needs to know that a directory
itself is untracked; it doesn't need to recurse into it to see what
is underneath.
* 'git status' needs to recurse into an untracked directory, but only
to determine whether or not it is empty. If there are no files
underneath, the directory itself will be omitted from the output.
If it is not empty, only the directory will be listed.
* 'git status --ignored' needs to recurse into untracked directories
and report all the ignored entries and then report the directory as
untracked -- UNLESS all the entries under the directory are
ignored, in which case we don't print any of the entries under the
directory and just report the directory itself as ignored. (Note
that although this forces us to walk all untracked files underneath
the directory as well, we strip them from the output, except for
users like 'git clean' who also set DIR_KEEP_TRACKED_CONTENTS.)
* For 'git clean', we may need to recurse into a directory that
doesn't match any specified pathspecs, if it's possible that there
is an entry underneath the directory that can match one of the
pathspecs. In such a case, we need to be careful to omit the
directory itself from the list of paths (see commit 404ebceda01c
("dir: also check directories for matching pathspecs", 2019-09-17))
Part of the tension noted above is that the treatment of a directory can
change based on the files within it, and based on the various settings
in dir->flags. Trying to keep this in mind while reading over the code,
it is easy to think in terms of "treat_directory() tells us what to do
with a directory, and read_directory_recursive() is the thing that
recurses". Since we need to look into a directory to know how to treat
it, though, it is quite easy to decide to (also) recurse into the
directory from treat_directory() by adding a read_directory_recursive()
call. Adding such a call is actually fine, IF we make sure that
read_directory_recursive() does not also recurse into that same
directory.
Unfortunately, commit df5bcdf83aeb ("dir: recurse into untracked dirs
for ignored files", 2017-05-18), added exactly such a case to the code,
meaning we'd have two calls to read_directory_recursive() for an
untracked directory. So, if we had a file named
one/two/three/four/five/somefile.txt
and nothing in one/ was tracked, then 'git status --ignored' would
call read_directory_recursive() twice on the directory 'one/', and
each of those would call read_directory_recursive() twice on the
directory 'one/two/', and so on until read_directory_recursive() was
called 2^5 times for 'one/two/three/four/five/'.
Avoid calling read_directory_recursive() twice per level by moving a
lot of the special logic into treat_directory().
Since dir.c is somewhat complex, extra cruft built up around this over
time. While trying to unravel it, I noticed several instances where the
first call to read_directory_recursive() would return e.g.
path_untracked for some directory and a later one would return e.g.
path_none, despite the fact that the directory clearly should have been
considered untracked. The code happened to work due to the side-effect
from the first invocation of adding untracked entries to dir->entries;
this allowed it to get the correct output despite the supposed override
in return value by the later call.
I am somewhat concerned that there are still bugs and maybe even
testcases with the wrong expectation. I have tried to carefully
document treat_directory() since it becomes more complex after this
change (though much of this complexity came from elsewhere that probably
deserved better comments to begin with). However, much of my work felt
more like a game of whackamole while attempting to make the code match
the existing regression tests than an attempt to create an
implementation that matched some clear design. That seems wrong to me,
but the rules of existing behavior had so many special cases that I had
a hard time coming up with some overarching rules about what correct
behavior is for all cases, forcing me to hope that the regression tests
are correct and sufficient. Such a hope seems likely to be ill-founded,
given my experience with dir.c-related testcases in the last few months:
Examples where the documentation was hard to parse or even just wrong:
* 3aca58045f4f (git-clean.txt: do not claim we will delete files with
-n/--dry-run, 2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
Examples where testcases were declared wrong and changed:
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* a2b13367fe55 (Revert "dir.c: make 'git-status --ignored' work within
leading directories", 2019-12-10)
Examples where testcases were clearly inadequate:
* 502c386ff944 (t7300-clean: demonstrate deleting nested repo with an
ignored file breakage, 2019-08-25)
* 7541cc530239 (t7300: add testcases showing failure to clean specified
pathspecs, 2019-09-17)
* a5e916c7453b (dir: fix off-by-one error in match_pathspec_item,
2019-09-17)
* 404ebceda01c (dir: also check directories for matching pathspecs,
2019-09-17)
* 09487f2cbad3 (clean: avoid removing untracked files in a nested git
repository, 2019-09-17)
* e86bbcf987fa (clean: disambiguate the definition of -d, 2019-09-17)
* 452efd11fbf6 (t3011: demonstrate directory traversal failures,
2019-12-10)
* b9670c1f5e6b (dir: fix checks on common prefix directory, 2019-12-19)
Examples where "correct behavior" was unclear to everyone:
https://lore.kernel.org/git/20190905154735.29784-1-newren@gmail.com/
Other commits of note:
* 902b90cf42bc (clean: fix theoretical path corruption, 2019-09-17)
However, on the positive side, it does make the code much faster. For
the following simple shell loop in an empty repository:
for depth in $(seq 10 25)
do
dirs=$(for i in $(seq 1 $depth) ; do printf 'dir/' ; done)
rm -rf dir
mkdir -p $dirs
>$dirs/untracked-file
/usr/bin/time --format="$depth: %e" git status --ignored >/dev/null
done
I saw the following timings, in seconds (note that the numbers are a
little noisy from run-to-run, but the trend is very clear with every
run):
10: 0.03
11: 0.05
12: 0.08
13: 0.19
14: 0.29
15: 0.50
16: 1.05
17: 2.11
18: 4.11
19: 8.60
20: 17.55
21: 33.87
22: 68.71
23: 140.05
24: 274.45
25: 551.15
For the above run, using strace I can look for the number of untracked
directories opened and can verify that it matches the expected
2^($depth+1)-2 (the sum of 2^1 + 2^2 + 2^3 + ... + 2^$depth).
After this fix, with strace I can verify that the number of untracked
directories that are opened drops to just $depth, and the timings all
drop to 0.00. In fact, it isn't until a depth of 190 nested directories
that it sometimes starts reporting a time of 0.01 seconds and doesn't
consistently report 0.01 seconds until there are 240 nested directories.
The previous code would have taken
17.55 * 2^220 / (60*60*24*365) = 9.4 * 10^59 YEARS
to have completed the 240 nested directories case. It's not often
that you get to speed something up by a factor of 3*10^69.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-04-01 04:17:42 +00:00
|
|
|
state = treat_path(dir, NULL, &cdir, istate, &sb, prevlen, pathspec);
|
2019-12-19 21:28:25 +00:00
|
|
|
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
if (state != path_recurse)
|
2012-05-01 11:25:24 +00:00
|
|
|
break; /* do not recurse into it */
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
if (len <= baselen)
|
2012-05-01 11:25:24 +00:00
|
|
|
break; /* finished checking */
|
2010-01-09 07:05:41 +00:00
|
|
|
}
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
add_path_to_appropriate_result_list(dir, NULL, &cdir, istate,
|
|
|
|
&sb, baselen, pathspec,
|
|
|
|
state);
|
|
|
|
|
2020-01-16 20:21:55 +00:00
|
|
|
strbuf_release(&subdir);
|
2012-05-01 11:25:24 +00:00
|
|
|
strbuf_release(&sb);
|
dir: fix checks on common prefix directory
Many years ago, the directory traversing logic had an optimization that
would always recurse into any directory that was a common prefix of all
the pathspecs without walking the leading directories to get down to
the desired directory. Thus,
git ls-files -o .git/ # case A
would notice that .git/ was a common prefix of all pathspecs (since
it is the only pathspec listed), and then traverse into it and start
showing unknown files under that directory. Unfortunately, .git/ is not
a directory we should be traversing into, which made this optimization
problematic. This also affected cases like
git ls-files -o --exclude-standard t/ # case B
where t/ was in the .gitignore file and thus isn't interesting and
shouldn't be recursed into. It also affected cases like
git ls-files -o --directory untracked_dir/ # case C
where untracked_dir/ is indeed untracked and thus interesting, but the
--directory flag means we only want to show the directory itself, not
recurse into it and start listing untracked files below it.
The case B class of bugs were noted and fixed in commits 16e2cfa90993
("read_directory(): further split treat_path()", 2010-01-08) and
48ffef966c76 ("ls-files: fix overeager pathspec optimization",
2010-01-08), with the idea being that we first wanted to check whether
the common prefix was interesting. The former patch noted that
treat_path() couldn't be used when checking the common prefix because
treat_path() requires a dir_entry() and we haven't read any directories
at the point we are checking the common prefix. So, that patch split
treat_one_path() out of treat_path(). The latter patch then created a
new treat_leading_path() which duplicated by hand the bits of
treat_path() that couldn't be broken out and then called
treat_one_path() for the remainder. There were three problems with this
approach:
* The duplicated logic in treat_leading_path() accidentally missed the
check for special paths (such as is_dot_or_dotdot and matching
".git"), causing case A types of bugs to continue to be an issue.
* The treat_leading_path() logic assumed we should traverse into
anything where path_treatment was not path_none, i.e. it perpetuated
class C types of bugs.
* It meant we had split logic that needed to kept in sync, running the
risk that people introduced new inconsistencies (such as in commit
be8a84c52669, which we reverted earlier in this series, or in commit
df5bcdf83ae which we'll fix in a subsequent commit)
Fix most these problems by making treat_leading_path() not only loop
over each leading path component, but calling treat_path() directly on
each. To do so, we have to create a synthetic dir_entry, but that only
takes a few lines. Then, pay attention to the path_treatment result we
get from treat_path() and don't treat path_excluded, path_untracked, and
path_recurse all the same as path_recurse.
This leaves one remaining problem, the new inconsistency from commit
df5bcdf83ae. That will be addressed in a subsequent commit.
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2019-12-19 21:28:24 +00:00
|
|
|
return state == path_recurse;
|
2010-01-09 07:05:41 +00:00
|
|
|
}
|
|
|
|
|
2015-03-08 10:12:46 +00:00
|
|
|
static const char *get_ident_string(void)
|
|
|
|
{
|
|
|
|
static struct strbuf sb = STRBUF_INIT;
|
|
|
|
struct utsname uts;
|
|
|
|
|
|
|
|
if (sb.len)
|
|
|
|
return sb.buf;
|
2015-07-17 17:09:41 +00:00
|
|
|
if (uname(&uts) < 0)
|
2015-03-08 10:12:46 +00:00
|
|
|
die_errno(_("failed to get kernel name and information"));
|
2016-01-24 15:28:21 +00:00
|
|
|
strbuf_addf(&sb, "Location %s, system %s", get_git_work_tree(),
|
|
|
|
uts.sysname);
|
2015-03-08 10:12:46 +00:00
|
|
|
return sb.buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ident_in_untracked(const struct untracked_cache *uc)
|
|
|
|
{
|
2016-01-24 15:28:21 +00:00
|
|
|
/*
|
|
|
|
* Previous git versions may have saved many NUL separated
|
|
|
|
* strings in the "ident" field, but it is insane to manage
|
|
|
|
* many locations, so just take care of the first one.
|
|
|
|
*/
|
2015-03-08 10:12:46 +00:00
|
|
|
|
2016-01-24 15:28:21 +00:00
|
|
|
return !strcmp(uc->ident.buf, get_ident_string());
|
2015-03-08 10:12:46 +00:00
|
|
|
}
|
|
|
|
|
2016-01-24 15:28:21 +00:00
|
|
|
static void set_untracked_ident(struct untracked_cache *uc)
|
2015-03-08 10:12:46 +00:00
|
|
|
{
|
2016-01-24 15:28:21 +00:00
|
|
|
strbuf_reset(&uc->ident);
|
2015-03-08 10:12:46 +00:00
|
|
|
strbuf_addstr(&uc->ident, get_ident_string());
|
2016-01-24 15:28:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This strbuf used to contain a list of NUL separated
|
|
|
|
* strings, so save NUL too for backward compatibility.
|
|
|
|
*/
|
2015-03-08 10:12:46 +00:00
|
|
|
strbuf_addch(&uc->ident, 0);
|
|
|
|
}
|
|
|
|
|
untracked-cache: support '--untracked-files=all' if configured
Untracked cache was originally designed to only work with
"--untracked-files=normal", and is bypassed when
"--untracked-files=all" is requested, but this causes performance
issues for UI tooling that wants to see "all" on a frequent basis.
On the other hand, the conditions that altogether prevented
applicability to the "all" mode no longer seem to apply, after
several major refactors in recent years; this possibility was
discussed in
81153d02-8e7a-be59-e709-e90cd5906f3a@jeffhostetler.com and
CABPp-BFiwzzUgiTj_zu+vF5x20L0=1cf25cHwk7KZQj2YkVzXw@mail.gmail.com,
and somewhat confirmed experimentally by several users using a
version of this patch to use untracked cache with -uall for about a
year.
When 'git status' runs without using the untracked cache, on a large
repo, on windows, with fsmonitor, it can run very slowly. This can
make GUIs that need to use "-uall" (and therefore currently bypass
untracked cache) unusable when fsmonitor is enabled, on such large
repos.
To partially address this, align the supported directory flags for the
stored untracked cache data with the git config. If a user specifies
an '--untracked-files=' commandline parameter that does not align with
their 'status.showuntrackedfiles' config value, then the untracked
cache will be ignored - as it is for other unsupported situations like
when a pathspec is specified.
If the previously stored flags no longer match the current
configuration, but the currently-applicable flags do match the current
configuration, then discard the previously stored untracked cache
data.
For most users there will be no change in behavior. Users who need
'--untracked-files=all' to perform well will now have the option of
setting "status.showuntrackedfiles" to "all" for better / more
consistent performance.
Users who need '--untracked-files=all' to perform well for their
tooling AND prefer to avoid the verbosity of "all" when running
git status explicitly without options... are out of luck for now (no
change).
Users who have the "status.showuntrackedfiles" config set to "all"
and yet frequently explicitly call
'git status --untracked-files=normal' (and use the untracked cache)
are the only ones who will be disadvantaged by this change. Their
"--untracked-files=normal" calls will, after this change, no longer
use the untracked cache.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-03-31 16:02:15 +00:00
|
|
|
static unsigned new_untracked_cache_flags(struct index_state *istate)
|
|
|
|
{
|
|
|
|
struct repository *repo = istate->repo;
|
|
|
|
char *val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This logic is coordinated with the setting of these flags in
|
|
|
|
* wt-status.c#wt_status_collect_untracked(), and the evaluation
|
|
|
|
* of the config setting in commit.c#git_status_config()
|
|
|
|
*/
|
|
|
|
if (!repo_config_get_string(repo, "status.showuntrackedfiles", &val) &&
|
|
|
|
!strcmp(val, "all"))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The default, if "all" is not set, is "normal" - leading us here.
|
|
|
|
* If the value is "none" then it really doesn't matter.
|
|
|
|
*/
|
|
|
|
return DIR_SHOW_OTHER_DIRECTORIES | DIR_HIDE_EMPTY_DIRECTORIES;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void new_untracked_cache(struct index_state *istate, int flags)
|
2016-01-24 15:28:19 +00:00
|
|
|
{
|
|
|
|
struct untracked_cache *uc = xcalloc(1, sizeof(*uc));
|
|
|
|
strbuf_init(&uc->ident, 100);
|
|
|
|
uc->exclude_per_dir = ".gitignore";
|
untracked-cache: support '--untracked-files=all' if configured
Untracked cache was originally designed to only work with
"--untracked-files=normal", and is bypassed when
"--untracked-files=all" is requested, but this causes performance
issues for UI tooling that wants to see "all" on a frequent basis.
On the other hand, the conditions that altogether prevented
applicability to the "all" mode no longer seem to apply, after
several major refactors in recent years; this possibility was
discussed in
81153d02-8e7a-be59-e709-e90cd5906f3a@jeffhostetler.com and
CABPp-BFiwzzUgiTj_zu+vF5x20L0=1cf25cHwk7KZQj2YkVzXw@mail.gmail.com,
and somewhat confirmed experimentally by several users using a
version of this patch to use untracked cache with -uall for about a
year.
When 'git status' runs without using the untracked cache, on a large
repo, on windows, with fsmonitor, it can run very slowly. This can
make GUIs that need to use "-uall" (and therefore currently bypass
untracked cache) unusable when fsmonitor is enabled, on such large
repos.
To partially address this, align the supported directory flags for the
stored untracked cache data with the git config. If a user specifies
an '--untracked-files=' commandline parameter that does not align with
their 'status.showuntrackedfiles' config value, then the untracked
cache will be ignored - as it is for other unsupported situations like
when a pathspec is specified.
If the previously stored flags no longer match the current
configuration, but the currently-applicable flags do match the current
configuration, then discard the previously stored untracked cache
data.
For most users there will be no change in behavior. Users who need
'--untracked-files=all' to perform well will now have the option of
setting "status.showuntrackedfiles" to "all" for better / more
consistent performance.
Users who need '--untracked-files=all' to perform well for their
tooling AND prefer to avoid the verbosity of "all" when running
git status explicitly without options... are out of luck for now (no
change).
Users who have the "status.showuntrackedfiles" config set to "all"
and yet frequently explicitly call
'git status --untracked-files=normal' (and use the untracked cache)
are the only ones who will be disadvantaged by this change. Their
"--untracked-files=normal" calls will, after this change, no longer
use the untracked cache.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-03-31 16:02:15 +00:00
|
|
|
uc->dir_flags = flags >= 0 ? flags : new_untracked_cache_flags(istate);
|
2016-01-24 15:28:21 +00:00
|
|
|
set_untracked_ident(uc);
|
2016-01-24 15:28:19 +00:00
|
|
|
istate->untracked = uc;
|
2016-01-24 15:28:21 +00:00
|
|
|
istate->cache_changed |= UNTRACKED_CHANGED;
|
2016-01-24 15:28:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void add_untracked_cache(struct index_state *istate)
|
|
|
|
{
|
|
|
|
if (!istate->untracked) {
|
untracked-cache: support '--untracked-files=all' if configured
Untracked cache was originally designed to only work with
"--untracked-files=normal", and is bypassed when
"--untracked-files=all" is requested, but this causes performance
issues for UI tooling that wants to see "all" on a frequent basis.
On the other hand, the conditions that altogether prevented
applicability to the "all" mode no longer seem to apply, after
several major refactors in recent years; this possibility was
discussed in
81153d02-8e7a-be59-e709-e90cd5906f3a@jeffhostetler.com and
CABPp-BFiwzzUgiTj_zu+vF5x20L0=1cf25cHwk7KZQj2YkVzXw@mail.gmail.com,
and somewhat confirmed experimentally by several users using a
version of this patch to use untracked cache with -uall for about a
year.
When 'git status' runs without using the untracked cache, on a large
repo, on windows, with fsmonitor, it can run very slowly. This can
make GUIs that need to use "-uall" (and therefore currently bypass
untracked cache) unusable when fsmonitor is enabled, on such large
repos.
To partially address this, align the supported directory flags for the
stored untracked cache data with the git config. If a user specifies
an '--untracked-files=' commandline parameter that does not align with
their 'status.showuntrackedfiles' config value, then the untracked
cache will be ignored - as it is for other unsupported situations like
when a pathspec is specified.
If the previously stored flags no longer match the current
configuration, but the currently-applicable flags do match the current
configuration, then discard the previously stored untracked cache
data.
For most users there will be no change in behavior. Users who need
'--untracked-files=all' to perform well will now have the option of
setting "status.showuntrackedfiles" to "all" for better / more
consistent performance.
Users who need '--untracked-files=all' to perform well for their
tooling AND prefer to avoid the verbosity of "all" when running
git status explicitly without options... are out of luck for now (no
change).
Users who have the "status.showuntrackedfiles" config set to "all"
and yet frequently explicitly call
'git status --untracked-files=normal' (and use the untracked cache)
are the only ones who will be disadvantaged by this change. Their
"--untracked-files=normal" calls will, after this change, no longer
use the untracked cache.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-03-31 16:02:15 +00:00
|
|
|
new_untracked_cache(istate, -1);
|
2016-01-24 15:28:21 +00:00
|
|
|
} else {
|
|
|
|
if (!ident_in_untracked(istate->untracked)) {
|
|
|
|
free_untracked_cache(istate->untracked);
|
untracked-cache: support '--untracked-files=all' if configured
Untracked cache was originally designed to only work with
"--untracked-files=normal", and is bypassed when
"--untracked-files=all" is requested, but this causes performance
issues for UI tooling that wants to see "all" on a frequent basis.
On the other hand, the conditions that altogether prevented
applicability to the "all" mode no longer seem to apply, after
several major refactors in recent years; this possibility was
discussed in
81153d02-8e7a-be59-e709-e90cd5906f3a@jeffhostetler.com and
CABPp-BFiwzzUgiTj_zu+vF5x20L0=1cf25cHwk7KZQj2YkVzXw@mail.gmail.com,
and somewhat confirmed experimentally by several users using a
version of this patch to use untracked cache with -uall for about a
year.
When 'git status' runs without using the untracked cache, on a large
repo, on windows, with fsmonitor, it can run very slowly. This can
make GUIs that need to use "-uall" (and therefore currently bypass
untracked cache) unusable when fsmonitor is enabled, on such large
repos.
To partially address this, align the supported directory flags for the
stored untracked cache data with the git config. If a user specifies
an '--untracked-files=' commandline parameter that does not align with
their 'status.showuntrackedfiles' config value, then the untracked
cache will be ignored - as it is for other unsupported situations like
when a pathspec is specified.
If the previously stored flags no longer match the current
configuration, but the currently-applicable flags do match the current
configuration, then discard the previously stored untracked cache
data.
For most users there will be no change in behavior. Users who need
'--untracked-files=all' to perform well will now have the option of
setting "status.showuntrackedfiles" to "all" for better / more
consistent performance.
Users who need '--untracked-files=all' to perform well for their
tooling AND prefer to avoid the verbosity of "all" when running
git status explicitly without options... are out of luck for now (no
change).
Users who have the "status.showuntrackedfiles" config set to "all"
and yet frequently explicitly call
'git status --untracked-files=normal' (and use the untracked cache)
are the only ones who will be disadvantaged by this change. Their
"--untracked-files=normal" calls will, after this change, no longer
use the untracked cache.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-03-31 16:02:15 +00:00
|
|
|
new_untracked_cache(istate, -1);
|
2016-01-24 15:28:21 +00:00
|
|
|
}
|
|
|
|
}
|
2016-01-24 15:28:19 +00:00
|
|
|
}
|
|
|
|
|
2016-01-24 15:28:20 +00:00
|
|
|
void remove_untracked_cache(struct index_state *istate)
|
|
|
|
{
|
|
|
|
if (istate->untracked) {
|
|
|
|
free_untracked_cache(istate->untracked);
|
|
|
|
istate->untracked = NULL;
|
|
|
|
istate->cache_changed |= UNTRACKED_CHANGED;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-08 10:12:26 +00:00
|
|
|
static struct untracked_cache_dir *validate_untracked_cache(struct dir_struct *dir,
|
|
|
|
int base_len,
|
untracked-cache: write index when populating empty untracked cache
It is expected that an empty/unpopulated untracked cache structure can
be written to the index - by update-index, or by a "git status" call
that sees the untracked cache should be enabled and is not, but is
running with options that make the untracked cache non-applicable in
that run (eg a pathspec).
Currently, if that happens, then subsequent "git status" calls end up
populating the untracked cache, but not writing the index (not saving
their work) - so the performance outcome is almost identical to the
cache being altogether disabled.
This continues until the index gets written with the untracked cache
populated, for some *other* reason, such as a working tree change.
Detect the condition where an empty untracked cache exists in the
index and we will collect the list of untracked paths, and queue an
index write under that condition, so that the collected untracked
paths can be written out to the untracked cache extension in the
index.
This change depends on previous fixes to t7519 for the "ignore .git
changes when invalidating UNTR" test case to pass - before this fix,
the test never actually did anything as it was not set up correctly.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-02-27 21:57:01 +00:00
|
|
|
const struct pathspec *pathspec,
|
|
|
|
struct index_state *istate)
|
2015-03-08 10:12:26 +00:00
|
|
|
{
|
|
|
|
struct untracked_cache_dir *root;
|
2018-02-28 21:21:09 +00:00
|
|
|
static int untracked_cache_disabled = -1;
|
2015-03-08 10:12:26 +00:00
|
|
|
|
2018-02-28 21:21:09 +00:00
|
|
|
if (!dir->untracked)
|
|
|
|
return NULL;
|
|
|
|
if (untracked_cache_disabled < 0)
|
|
|
|
untracked_cache_disabled = git_env_bool("GIT_DISABLE_UNTRACKED_CACHE", 0);
|
|
|
|
if (untracked_cache_disabled)
|
2015-03-08 10:12:26 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only support $GIT_DIR/info/exclude and core.excludesfile
|
|
|
|
* as the global ignore rule files. Any other additions
|
|
|
|
* (e.g. from command line) invalidate the cache. This
|
|
|
|
* condition also catches running setup_standard_excludes()
|
|
|
|
* before setting dir->untracked!
|
|
|
|
*/
|
2023-02-27 15:28:10 +00:00
|
|
|
if (dir->internal.unmanaged_exclude_files)
|
2015-03-08 10:12:26 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Optimize for the main use case only: whole-tree git
|
|
|
|
* status. More work involved in treat_leading_path() if we
|
|
|
|
* use cache on just a subset of the worktree. pathspec
|
|
|
|
* support could make the matter even worse.
|
|
|
|
*/
|
|
|
|
if (base_len || (pathspec && pathspec->nr))
|
|
|
|
return NULL;
|
|
|
|
|
untracked-cache: support '--untracked-files=all' if configured
Untracked cache was originally designed to only work with
"--untracked-files=normal", and is bypassed when
"--untracked-files=all" is requested, but this causes performance
issues for UI tooling that wants to see "all" on a frequent basis.
On the other hand, the conditions that altogether prevented
applicability to the "all" mode no longer seem to apply, after
several major refactors in recent years; this possibility was
discussed in
81153d02-8e7a-be59-e709-e90cd5906f3a@jeffhostetler.com and
CABPp-BFiwzzUgiTj_zu+vF5x20L0=1cf25cHwk7KZQj2YkVzXw@mail.gmail.com,
and somewhat confirmed experimentally by several users using a
version of this patch to use untracked cache with -uall for about a
year.
When 'git status' runs without using the untracked cache, on a large
repo, on windows, with fsmonitor, it can run very slowly. This can
make GUIs that need to use "-uall" (and therefore currently bypass
untracked cache) unusable when fsmonitor is enabled, on such large
repos.
To partially address this, align the supported directory flags for the
stored untracked cache data with the git config. If a user specifies
an '--untracked-files=' commandline parameter that does not align with
their 'status.showuntrackedfiles' config value, then the untracked
cache will be ignored - as it is for other unsupported situations like
when a pathspec is specified.
If the previously stored flags no longer match the current
configuration, but the currently-applicable flags do match the current
configuration, then discard the previously stored untracked cache
data.
For most users there will be no change in behavior. Users who need
'--untracked-files=all' to perform well will now have the option of
setting "status.showuntrackedfiles" to "all" for better / more
consistent performance.
Users who need '--untracked-files=all' to perform well for their
tooling AND prefer to avoid the verbosity of "all" when running
git status explicitly without options... are out of luck for now (no
change).
Users who have the "status.showuntrackedfiles" config set to "all"
and yet frequently explicitly call
'git status --untracked-files=normal' (and use the untracked cache)
are the only ones who will be disadvantaged by this change. Their
"--untracked-files=normal" calls will, after this change, no longer
use the untracked cache.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-03-31 16:02:15 +00:00
|
|
|
/* We don't support collecting ignore files */
|
|
|
|
if (dir->flags & (DIR_SHOW_IGNORED | DIR_SHOW_IGNORED_TOO |
|
|
|
|
DIR_COLLECT_IGNORED))
|
2015-03-08 10:12:26 +00:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we use .gitignore in the cache and now you change it to
|
|
|
|
* .gitexclude, everything will go wrong.
|
|
|
|
*/
|
|
|
|
if (dir->exclude_per_dir != dir->untracked->exclude_per_dir &&
|
|
|
|
strcmp(dir->exclude_per_dir, dir->untracked->exclude_per_dir))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EXC_CMDL is not considered in the cache. If people set it,
|
|
|
|
* skip the cache.
|
|
|
|
*/
|
2023-02-27 15:28:10 +00:00
|
|
|
if (dir->internal.exclude_list_group[EXC_CMDL].nr)
|
2015-03-08 10:12:26 +00:00
|
|
|
return NULL;
|
|
|
|
|
2015-03-08 10:12:46 +00:00
|
|
|
if (!ident_in_untracked(dir->untracked)) {
|
2018-07-21 07:49:19 +00:00
|
|
|
warning(_("untracked cache is disabled on this system or location"));
|
2015-03-08 10:12:46 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
untracked-cache: support '--untracked-files=all' if configured
Untracked cache was originally designed to only work with
"--untracked-files=normal", and is bypassed when
"--untracked-files=all" is requested, but this causes performance
issues for UI tooling that wants to see "all" on a frequent basis.
On the other hand, the conditions that altogether prevented
applicability to the "all" mode no longer seem to apply, after
several major refactors in recent years; this possibility was
discussed in
81153d02-8e7a-be59-e709-e90cd5906f3a@jeffhostetler.com and
CABPp-BFiwzzUgiTj_zu+vF5x20L0=1cf25cHwk7KZQj2YkVzXw@mail.gmail.com,
and somewhat confirmed experimentally by several users using a
version of this patch to use untracked cache with -uall for about a
year.
When 'git status' runs without using the untracked cache, on a large
repo, on windows, with fsmonitor, it can run very slowly. This can
make GUIs that need to use "-uall" (and therefore currently bypass
untracked cache) unusable when fsmonitor is enabled, on such large
repos.
To partially address this, align the supported directory flags for the
stored untracked cache data with the git config. If a user specifies
an '--untracked-files=' commandline parameter that does not align with
their 'status.showuntrackedfiles' config value, then the untracked
cache will be ignored - as it is for other unsupported situations like
when a pathspec is specified.
If the previously stored flags no longer match the current
configuration, but the currently-applicable flags do match the current
configuration, then discard the previously stored untracked cache
data.
For most users there will be no change in behavior. Users who need
'--untracked-files=all' to perform well will now have the option of
setting "status.showuntrackedfiles" to "all" for better / more
consistent performance.
Users who need '--untracked-files=all' to perform well for their
tooling AND prefer to avoid the verbosity of "all" when running
git status explicitly without options... are out of luck for now (no
change).
Users who have the "status.showuntrackedfiles" config set to "all"
and yet frequently explicitly call
'git status --untracked-files=normal' (and use the untracked cache)
are the only ones who will be disadvantaged by this change. Their
"--untracked-files=normal" calls will, after this change, no longer
use the untracked cache.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-03-31 16:02:15 +00:00
|
|
|
/*
|
|
|
|
* If the untracked structure we received does not have the same flags
|
|
|
|
* as requested in this run, we're going to need to either discard the
|
|
|
|
* existing structure (and potentially later recreate), or bypass the
|
|
|
|
* untracked cache mechanism for this run.
|
|
|
|
*/
|
|
|
|
if (dir->flags != dir->untracked->dir_flags) {
|
|
|
|
/*
|
|
|
|
* If the untracked structure we received does not have the same flags
|
|
|
|
* as configured, then we need to reset / create a new "untracked"
|
|
|
|
* structure to match the new config.
|
|
|
|
*
|
|
|
|
* Keeping the saved and used untracked cache consistent with the
|
|
|
|
* configuration provides an opportunity for frequent users of
|
|
|
|
* "git status -uall" to leverage the untracked cache by aligning their
|
|
|
|
* configuration - setting "status.showuntrackedfiles" to "all" or
|
|
|
|
* "normal" as appropriate.
|
|
|
|
*
|
|
|
|
* Previously using -uall (or setting "status.showuntrackedfiles" to
|
|
|
|
* "all") was incompatible with untracked cache and *consistently*
|
|
|
|
* caused surprisingly bad performance (with fscache and fsmonitor
|
|
|
|
* enabled) on Windows.
|
|
|
|
*
|
|
|
|
* IMPROVEMENT OPPORTUNITY: If we reworked the untracked cache storage
|
|
|
|
* to not be as bound up with the desired output in a given run,
|
|
|
|
* and instead iterated through and stored enough information to
|
|
|
|
* correctly serve both "modes", then users could get peak performance
|
|
|
|
* with or without '-uall' regardless of their
|
|
|
|
* "status.showuntrackedfiles" config.
|
|
|
|
*/
|
|
|
|
if (dir->untracked->dir_flags != new_untracked_cache_flags(istate)) {
|
|
|
|
free_untracked_cache(istate->untracked);
|
|
|
|
new_untracked_cache(istate, dir->flags);
|
|
|
|
dir->untracked = istate->untracked;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* Current untracked cache data is consistent with config, but not
|
|
|
|
* usable in this request/run; just bypass untracked cache.
|
|
|
|
*/
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
untracked-cache: write index when populating empty untracked cache
It is expected that an empty/unpopulated untracked cache structure can
be written to the index - by update-index, or by a "git status" call
that sees the untracked cache should be enabled and is not, but is
running with options that make the untracked cache non-applicable in
that run (eg a pathspec).
Currently, if that happens, then subsequent "git status" calls end up
populating the untracked cache, but not writing the index (not saving
their work) - so the performance outcome is almost identical to the
cache being altogether disabled.
This continues until the index gets written with the untracked cache
populated, for some *other* reason, such as a working tree change.
Detect the condition where an empty untracked cache exists in the
index and we will collect the list of untracked paths, and queue an
index write under that condition, so that the collected untracked
paths can be written out to the untracked cache extension in the
index.
This change depends on previous fixes to t7519 for the "ignore .git
changes when invalidating UNTR" test case to pass - before this fix,
the test never actually did anything as it was not set up correctly.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-02-27 21:57:01 +00:00
|
|
|
if (!dir->untracked->root) {
|
|
|
|
/* Untracked cache existed but is not initialized; fix that */
|
2021-02-24 14:31:57 +00:00
|
|
|
FLEX_ALLOC_STR(dir->untracked->root, name, "");
|
untracked-cache: write index when populating empty untracked cache
It is expected that an empty/unpopulated untracked cache structure can
be written to the index - by update-index, or by a "git status" call
that sees the untracked cache should be enabled and is not, but is
running with options that make the untracked cache non-applicable in
that run (eg a pathspec).
Currently, if that happens, then subsequent "git status" calls end up
populating the untracked cache, but not writing the index (not saving
their work) - so the performance outcome is almost identical to the
cache being altogether disabled.
This continues until the index gets written with the untracked cache
populated, for some *other* reason, such as a working tree change.
Detect the condition where an empty untracked cache exists in the
index and we will collect the list of untracked paths, and queue an
index write under that condition, so that the collected untracked
paths can be written out to the untracked cache extension in the
index.
This change depends on previous fixes to t7519 for the "ignore .git
changes when invalidating UNTR" test case to pass - before this fix,
the test never actually did anything as it was not set up correctly.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-02-27 21:57:01 +00:00
|
|
|
istate->cache_changed |= UNTRACKED_CHANGED;
|
|
|
|
}
|
2015-03-08 10:12:26 +00:00
|
|
|
|
|
|
|
/* Validate $GIT_DIR/info/exclude and core.excludesfile */
|
|
|
|
root = dir->untracked->root;
|
2023-02-27 15:28:10 +00:00
|
|
|
if (!oideq(&dir->internal.ss_info_exclude.oid,
|
2018-01-28 00:13:12 +00:00
|
|
|
&dir->untracked->ss_info_exclude.oid)) {
|
2015-03-08 10:12:26 +00:00
|
|
|
invalidate_gitignore(dir->untracked, root);
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->untracked->ss_info_exclude = dir->internal.ss_info_exclude;
|
2015-03-08 10:12:26 +00:00
|
|
|
}
|
2023-02-27 15:28:10 +00:00
|
|
|
if (!oideq(&dir->internal.ss_excludes_file.oid,
|
2018-01-28 00:13:12 +00:00
|
|
|
&dir->untracked->ss_excludes_file.oid)) {
|
2015-03-08 10:12:26 +00:00
|
|
|
invalidate_gitignore(dir->untracked, root);
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->untracked->ss_excludes_file = dir->internal.ss_excludes_file;
|
2015-03-08 10:12:26 +00:00
|
|
|
}
|
2015-03-08 10:12:30 +00:00
|
|
|
|
|
|
|
/* Make sure this directory is not dropped out at saving phase */
|
|
|
|
root->recurse = 1;
|
2015-03-08 10:12:26 +00:00
|
|
|
return root;
|
|
|
|
}
|
|
|
|
|
2021-05-12 17:28:14 +00:00
|
|
|
static void emit_traversal_statistics(struct dir_struct *dir,
|
|
|
|
struct repository *repo,
|
|
|
|
const char *path,
|
|
|
|
int path_len)
|
|
|
|
{
|
|
|
|
if (!trace2_is_enabled())
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!path_len) {
|
|
|
|
trace2_data_string("read_directory", repo, "path", "");
|
|
|
|
} else {
|
|
|
|
struct strbuf tmp = STRBUF_INIT;
|
|
|
|
strbuf_add(&tmp, path, path_len);
|
|
|
|
trace2_data_string("read_directory", repo, "path", tmp.buf);
|
|
|
|
strbuf_release(&tmp);
|
|
|
|
}
|
|
|
|
|
2021-05-12 17:28:15 +00:00
|
|
|
trace2_data_intmax("read_directory", repo,
|
2023-02-27 15:28:10 +00:00
|
|
|
"directories-visited", dir->internal.visited_directories);
|
2021-05-12 17:28:15 +00:00
|
|
|
trace2_data_intmax("read_directory", repo,
|
2023-02-27 15:28:10 +00:00
|
|
|
"paths-visited", dir->internal.visited_paths);
|
2021-05-12 17:28:15 +00:00
|
|
|
|
2021-05-12 17:28:14 +00:00
|
|
|
if (!dir->untracked)
|
|
|
|
return;
|
|
|
|
trace2_data_intmax("read_directory", repo,
|
|
|
|
"node-creation", dir->untracked->dir_created);
|
|
|
|
trace2_data_intmax("read_directory", repo,
|
|
|
|
"gitignore-invalidation",
|
|
|
|
dir->untracked->gitignore_invalidated);
|
|
|
|
trace2_data_intmax("read_directory", repo,
|
|
|
|
"directory-invalidation",
|
|
|
|
dir->untracked->dir_invalidated);
|
|
|
|
trace2_data_intmax("read_directory", repo,
|
|
|
|
"opendir", dir->untracked->dir_opened);
|
|
|
|
}
|
|
|
|
|
2017-05-05 19:53:33 +00:00
|
|
|
int read_directory(struct dir_struct *dir, struct index_state *istate,
|
|
|
|
const char *path, int len, const struct pathspec *pathspec)
|
Optimize directory listing with pathspec limiter.
The way things are set up, you can now pass a "pathspec" to the
"read_directory()" function. If you pass NULL, it acts exactly
like it used to do (read everything). If you pass a non-NULL
pointer, it will simplify it into a "these are the prefixes
without any special characters", and stop any readdir() early if
the path in question doesn't match any of the prefixes.
NOTE! This does *not* obviate the need for the caller to do the *exact*
pathspec match later. It's a first-level filter on "read_directory()", but
it does not do the full pathspec thing. Maybe it should. But in the
meantime, builtin-add.c really does need to do first
read_directory(dir, .., pathspec);
if (pathspec)
prune_directory(dir, pathspec, baselen);
ie the "prune_directory()" part will do the *exact* pathspec pruning,
while the "read_directory()" will use the pathspec just to do some quick
high-level pruning of the directories it will recurse into.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Junio C Hamano <junkio@cox.net>
2007-03-31 03:39:30 +00:00
|
|
|
{
|
2015-03-08 10:12:26 +00:00
|
|
|
struct untracked_cache_dir *untracked;
|
2006-05-17 02:46:16 +00:00
|
|
|
|
2021-05-12 17:28:14 +00:00
|
|
|
trace2_region_enter("dir", "read_directory", istate->repo);
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->internal.visited_paths = 0;
|
|
|
|
dir->internal.visited_directories = 0;
|
2018-08-18 14:41:22 +00:00
|
|
|
|
|
|
|
if (has_symlink_leading_path(path, len)) {
|
2021-05-12 17:28:14 +00:00
|
|
|
trace2_region_leave("dir", "read_directory", istate->repo);
|
2008-08-04 07:52:37 +00:00
|
|
|
return dir->nr;
|
2018-08-18 14:41:22 +00:00
|
|
|
}
|
2008-08-04 07:52:37 +00:00
|
|
|
|
untracked-cache: write index when populating empty untracked cache
It is expected that an empty/unpopulated untracked cache structure can
be written to the index - by update-index, or by a "git status" call
that sees the untracked cache should be enabled and is not, but is
running with options that make the untracked cache non-applicable in
that run (eg a pathspec).
Currently, if that happens, then subsequent "git status" calls end up
populating the untracked cache, but not writing the index (not saving
their work) - so the performance outcome is almost identical to the
cache being altogether disabled.
This continues until the index gets written with the untracked cache
populated, for some *other* reason, such as a working tree change.
Detect the condition where an empty untracked cache exists in the
index and we will collect the list of untracked paths, and queue an
index write under that condition, so that the collected untracked
paths can be written out to the untracked cache extension in the
index.
This change depends on previous fixes to t7519 for the "ignore .git
changes when invalidating UNTR" test case to pass - before this fix,
the test never actually did anything as it was not set up correctly.
Signed-off-by: Tao Klerks <tao@klerks.biz>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-02-27 21:57:01 +00:00
|
|
|
untracked = validate_untracked_cache(dir, len, pathspec, istate);
|
2015-03-08 10:12:26 +00:00
|
|
|
if (!untracked)
|
|
|
|
/*
|
|
|
|
* make sure untracked cache code path is disabled,
|
|
|
|
* e.g. prep_exclude()
|
|
|
|
*/
|
|
|
|
dir->untracked = NULL;
|
2017-05-05 19:53:33 +00:00
|
|
|
if (!len || treat_leading_path(dir, istate, path, len, pathspec))
|
2017-09-18 17:24:33 +00:00
|
|
|
read_directory_recursive(dir, istate, path, len, untracked, 0, 0, pathspec);
|
2017-05-18 08:21:53 +00:00
|
|
|
QSORT(dir->entries, dir->nr, cmp_dir_entry);
|
|
|
|
QSORT(dir->ignored, dir->ignored_nr, cmp_dir_entry);
|
2017-05-18 08:21:52 +00:00
|
|
|
|
2021-05-12 17:28:14 +00:00
|
|
|
emit_traversal_statistics(dir, istate->repo, path, len);
|
|
|
|
|
|
|
|
trace2_region_leave("dir", "read_directory", istate->repo);
|
2015-03-08 10:12:38 +00:00
|
|
|
if (dir->untracked) {
|
2018-02-28 21:21:09 +00:00
|
|
|
static int force_untracked_cache = -1;
|
|
|
|
|
|
|
|
if (force_untracked_cache < 0)
|
|
|
|
force_untracked_cache =
|
2022-02-17 21:00:29 +00:00
|
|
|
git_env_bool("GIT_FORCE_UNTRACKED_CACHE", -1);
|
|
|
|
if (force_untracked_cache < 0)
|
|
|
|
force_untracked_cache = (istate->repo->settings.core_untracked_cache == UNTRACKED_CACHE_WRITE);
|
2018-02-28 21:21:09 +00:00
|
|
|
if (force_untracked_cache &&
|
2018-02-05 19:56:19 +00:00
|
|
|
dir->untracked == istate->untracked &&
|
2015-03-08 10:12:39 +00:00
|
|
|
(dir->untracked->dir_opened ||
|
|
|
|
dir->untracked->gitignore_invalidated ||
|
|
|
|
dir->untracked->dir_invalidated))
|
2017-05-05 19:53:33 +00:00
|
|
|
istate->cache_changed |= UNTRACKED_CHANGED;
|
|
|
|
if (dir->untracked != istate->untracked) {
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(dir->untracked);
|
2015-03-08 10:12:39 +00:00
|
|
|
}
|
2015-03-08 10:12:38 +00:00
|
|
|
}
|
2021-05-12 17:28:14 +00:00
|
|
|
|
2006-05-17 02:02:14 +00:00
|
|
|
return dir->nr;
|
|
|
|
}
|
2006-09-08 08:05:34 +00:00
|
|
|
|
2007-11-29 09:11:46 +00:00
|
|
|
int file_exists(const char *f)
|
2006-09-08 08:05:34 +00:00
|
|
|
{
|
2007-11-29 09:11:46 +00:00
|
|
|
struct stat sb;
|
2007-11-18 09:58:16 +00:00
|
|
|
return lstat(f, &sb) == 0;
|
2006-09-08 08:05:34 +00:00
|
|
|
}
|
2007-08-01 00:29:17 +00:00
|
|
|
|
2019-04-16 09:33:34 +00:00
|
|
|
int repo_file_exists(struct repository *repo, const char *path)
|
|
|
|
{
|
|
|
|
if (repo != the_repository)
|
|
|
|
BUG("do not know how to check file existence in arbitrary repo");
|
|
|
|
|
|
|
|
return file_exists(path);
|
|
|
|
}
|
|
|
|
|
2015-09-28 16:12:18 +00:00
|
|
|
static int cmp_icase(char a, char b)
|
|
|
|
{
|
|
|
|
if (a == b)
|
|
|
|
return 0;
|
|
|
|
if (ignore_case)
|
|
|
|
return toupper(a) - toupper(b);
|
|
|
|
return a - b;
|
|
|
|
}
|
|
|
|
|
2007-08-01 00:29:17 +00:00
|
|
|
/*
|
2011-03-26 09:04:24 +00:00
|
|
|
* Given two normalized paths (a trailing slash is ok), if subdir is
|
|
|
|
* outside dir, return -1. Otherwise return the offset in subdir that
|
|
|
|
* can be used as relative path to dir.
|
2007-08-01 00:29:17 +00:00
|
|
|
*/
|
2011-03-26 09:04:24 +00:00
|
|
|
int dir_inside_of(const char *subdir, const char *dir)
|
2007-08-01 00:29:17 +00:00
|
|
|
{
|
2011-03-26 09:04:24 +00:00
|
|
|
int offset = 0;
|
2007-08-01 00:29:17 +00:00
|
|
|
|
2011-03-26 09:04:24 +00:00
|
|
|
assert(dir && subdir && *dir && *subdir);
|
2007-08-01 00:29:17 +00:00
|
|
|
|
2015-09-28 16:12:18 +00:00
|
|
|
while (*dir && *subdir && !cmp_icase(*dir, *subdir)) {
|
2007-08-01 00:29:17 +00:00
|
|
|
dir++;
|
2011-03-26 09:04:24 +00:00
|
|
|
subdir++;
|
|
|
|
offset++;
|
2010-05-22 11:13:05 +00:00
|
|
|
}
|
2011-03-26 09:04:24 +00:00
|
|
|
|
|
|
|
/* hel[p]/me vs hel[l]/yeah */
|
|
|
|
if (*dir && *subdir)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (!*subdir)
|
|
|
|
return !*dir ? offset : -1; /* same dir */
|
|
|
|
|
|
|
|
/* foo/[b]ar vs foo/[] */
|
|
|
|
if (is_dir_sep(dir[-1]))
|
|
|
|
return is_dir_sep(subdir[-1]) ? offset : -1;
|
|
|
|
|
|
|
|
/* foo[/]bar vs foo[] */
|
|
|
|
return is_dir_sep(*subdir) ? offset + 1 : -1;
|
2007-08-01 00:29:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int is_inside_dir(const char *dir)
|
|
|
|
{
|
2014-07-28 18:30:39 +00:00
|
|
|
char *cwd;
|
|
|
|
int rc;
|
|
|
|
|
2011-03-26 09:04:25 +00:00
|
|
|
if (!dir)
|
|
|
|
return 0;
|
2014-07-28 18:30:39 +00:00
|
|
|
|
|
|
|
cwd = xgetcwd();
|
|
|
|
rc = (dir_inside_of(cwd, dir) >= 0);
|
|
|
|
free(cwd);
|
|
|
|
return rc;
|
2007-08-01 00:29:17 +00:00
|
|
|
}
|
2007-09-28 15:28:54 +00:00
|
|
|
|
2009-01-11 12:19:12 +00:00
|
|
|
int is_empty_dir(const char *path)
|
|
|
|
{
|
|
|
|
DIR *dir = opendir(path);
|
|
|
|
struct dirent *e;
|
|
|
|
int ret = 1;
|
|
|
|
|
|
|
|
if (!dir)
|
|
|
|
return 0;
|
|
|
|
|
2021-05-12 17:28:22 +00:00
|
|
|
e = readdir_skip_dot_and_dotdot(dir);
|
|
|
|
if (e)
|
|
|
|
ret = 0;
|
2009-01-11 12:19:12 +00:00
|
|
|
|
|
|
|
closedir(dir);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-08-10 11:46:36 +00:00
|
|
|
char *git_url_basename(const char *repo, int is_bundle, int is_bare)
|
|
|
|
{
|
|
|
|
const char *end = repo + strlen(repo), *start, *ptr;
|
|
|
|
size_t len;
|
|
|
|
char *dir;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip scheme.
|
|
|
|
*/
|
|
|
|
start = strstr(repo, "://");
|
2022-05-02 16:50:37 +00:00
|
|
|
if (!start)
|
2021-08-10 11:46:36 +00:00
|
|
|
start = repo;
|
|
|
|
else
|
|
|
|
start += 3;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Skip authentication data. The stripping does happen
|
|
|
|
* greedily, such that we strip up to the last '@' inside
|
|
|
|
* the host part.
|
|
|
|
*/
|
|
|
|
for (ptr = start; ptr < end && !is_dir_sep(*ptr); ptr++) {
|
|
|
|
if (*ptr == '@')
|
|
|
|
start = ptr + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Strip trailing spaces, slashes and /.git
|
|
|
|
*/
|
|
|
|
while (start < end && (is_dir_sep(end[-1]) || isspace(end[-1])))
|
|
|
|
end--;
|
|
|
|
if (end - start > 5 && is_dir_sep(end[-5]) &&
|
|
|
|
!strncmp(end - 4, ".git", 4)) {
|
|
|
|
end -= 5;
|
|
|
|
while (start < end && is_dir_sep(end[-1]))
|
|
|
|
end--;
|
|
|
|
}
|
|
|
|
|
2022-05-24 00:23:06 +00:00
|
|
|
/*
|
|
|
|
* It should not be possible to overflow `ptrdiff_t` by passing in an
|
|
|
|
* insanely long URL, but GCC does not know that and will complain
|
|
|
|
* without this check.
|
|
|
|
*/
|
|
|
|
if (end - start < 0)
|
|
|
|
die(_("No directory name could be guessed.\n"
|
|
|
|
"Please specify a directory on the command line"));
|
|
|
|
|
2021-08-10 11:46:36 +00:00
|
|
|
/*
|
|
|
|
* Strip trailing port number if we've got only a
|
|
|
|
* hostname (that is, there is no dir separator but a
|
|
|
|
* colon). This check is required such that we do not
|
|
|
|
* strip URI's like '/foo/bar:2222.git', which should
|
|
|
|
* result in a dir '2222' being guessed due to backwards
|
|
|
|
* compatibility.
|
|
|
|
*/
|
|
|
|
if (memchr(start, '/', end - start) == NULL
|
|
|
|
&& memchr(start, ':', end - start) != NULL) {
|
|
|
|
ptr = end;
|
|
|
|
while (start < ptr && isdigit(ptr[-1]) && ptr[-1] != ':')
|
|
|
|
ptr--;
|
|
|
|
if (start < ptr && ptr[-1] == ':')
|
|
|
|
end = ptr - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find last component. To remain backwards compatible we
|
|
|
|
* also regard colons as path separators, such that
|
|
|
|
* cloning a repository 'foo:bar.git' would result in a
|
|
|
|
* directory 'bar' being guessed.
|
|
|
|
*/
|
|
|
|
ptr = end;
|
|
|
|
while (start < ptr && !is_dir_sep(ptr[-1]) && ptr[-1] != ':')
|
|
|
|
ptr--;
|
|
|
|
start = ptr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Strip .{bundle,git}.
|
|
|
|
*/
|
|
|
|
len = end - start;
|
|
|
|
strip_suffix_mem(start, &len, is_bundle ? ".bundle" : ".git");
|
|
|
|
|
|
|
|
if (!len || (len == 1 && *start == '/'))
|
|
|
|
die(_("No directory name could be guessed.\n"
|
|
|
|
"Please specify a directory on the command line"));
|
|
|
|
|
|
|
|
if (is_bare)
|
|
|
|
dir = xstrfmt("%.*s.git", (int)len, start);
|
|
|
|
else
|
|
|
|
dir = xstrndup(start, len);
|
|
|
|
/*
|
|
|
|
* Replace sequences of 'control' characters and whitespace
|
|
|
|
* with one ascii space, remove leading and trailing spaces.
|
|
|
|
*/
|
|
|
|
if (*dir) {
|
|
|
|
char *out = dir;
|
|
|
|
int prev_space = 1 /* strip leading whitespace */;
|
|
|
|
for (end = dir; *end; ++end) {
|
|
|
|
char ch = *end;
|
|
|
|
if ((unsigned char)ch < '\x20')
|
|
|
|
ch = '\x20';
|
|
|
|
if (isspace(ch)) {
|
|
|
|
if (prev_space)
|
|
|
|
continue;
|
|
|
|
prev_space = 1;
|
|
|
|
} else
|
|
|
|
prev_space = 0;
|
|
|
|
*out++ = ch;
|
|
|
|
}
|
|
|
|
*out = '\0';
|
|
|
|
if (out > dir && prev_space)
|
|
|
|
out[-1] = '\0';
|
|
|
|
}
|
|
|
|
return dir;
|
|
|
|
}
|
|
|
|
|
|
|
|
void strip_dir_trailing_slashes(char *dir)
|
|
|
|
{
|
|
|
|
char *end = dir + strlen(dir);
|
|
|
|
|
|
|
|
while (dir < end - 1 && is_dir_sep(end[-1]))
|
|
|
|
end--;
|
|
|
|
*end = '\0';
|
|
|
|
}
|
|
|
|
|
2012-03-15 08:04:12 +00:00
|
|
|
static int remove_dir_recurse(struct strbuf *path, int flag, int *kept_up)
|
2007-09-28 15:28:54 +00:00
|
|
|
{
|
2009-06-30 22:33:45 +00:00
|
|
|
DIR *dir;
|
2007-09-28 15:28:54 +00:00
|
|
|
struct dirent *e;
|
2012-03-15 08:04:12 +00:00
|
|
|
int ret = 0, original_len = path->len, len, kept_down = 0;
|
2009-06-30 22:33:45 +00:00
|
|
|
int only_empty = (flag & REMOVE_DIR_EMPTY_ONLY);
|
2012-03-15 14:58:54 +00:00
|
|
|
int keep_toplevel = (flag & REMOVE_DIR_KEEP_TOPLEVEL);
|
2021-12-09 05:08:34 +00:00
|
|
|
int purge_original_cwd = (flag & REMOVE_DIR_PURGE_ORIGINAL_CWD);
|
2017-10-15 22:07:06 +00:00
|
|
|
struct object_id submodule_head;
|
2007-09-28 15:28:54 +00:00
|
|
|
|
2009-06-30 22:33:45 +00:00
|
|
|
if ((flag & REMOVE_DIR_KEEP_NESTED_GIT) &&
|
refs: convert resolve_gitlink_ref to struct object_id
Convert the declaration and definition of resolve_gitlink_ref to use
struct object_id and apply the following semantic patch:
@@
expression E1, E2, E3;
@@
- resolve_gitlink_ref(E1, E2, E3.hash)
+ resolve_gitlink_ref(E1, E2, &E3)
@@
expression E1, E2, E3;
@@
- resolve_gitlink_ref(E1, E2, E3->hash)
+ resolve_gitlink_ref(E1, E2, E3)
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-10-15 22:07:07 +00:00
|
|
|
!resolve_gitlink_ref(path->buf, "HEAD", &submodule_head)) {
|
2009-06-30 22:33:45 +00:00
|
|
|
/* Do not descend and nuke a nested git work tree. */
|
2012-03-15 08:04:12 +00:00
|
|
|
if (kept_up)
|
|
|
|
*kept_up = 1;
|
2009-06-30 22:33:45 +00:00
|
|
|
return 0;
|
2012-03-15 08:04:12 +00:00
|
|
|
}
|
2009-06-30 22:33:45 +00:00
|
|
|
|
2012-03-15 08:04:12 +00:00
|
|
|
flag &= ~REMOVE_DIR_KEEP_TOPLEVEL;
|
2009-06-30 22:33:45 +00:00
|
|
|
dir = opendir(path->buf);
|
2012-03-15 14:58:54 +00:00
|
|
|
if (!dir) {
|
2014-01-18 22:48:57 +00:00
|
|
|
if (errno == ENOENT)
|
|
|
|
return keep_toplevel ? -1 : 0;
|
|
|
|
else if (errno == EACCES && !keep_toplevel)
|
2014-01-18 22:48:56 +00:00
|
|
|
/*
|
|
|
|
* An empty dir could be removable even if it
|
|
|
|
* is unreadable:
|
|
|
|
*/
|
2012-03-15 14:58:54 +00:00
|
|
|
return rmdir(path->buf);
|
|
|
|
else
|
|
|
|
return -1;
|
|
|
|
}
|
use strbuf_complete to conditionally append slash
When working with paths in strbufs, we frequently want to
ensure that a directory contains a trailing slash before
appending to it. We can shorten this code (and make the
intent more obvious) by calling strbuf_complete.
Most of these cases are trivially identical conversions, but
there are two things to note:
- in a few cases we did not check that the strbuf is
non-empty (which would lead to an out-of-bounds memory
access). These were generally not triggerable in
practice, either from earlier assertions, or typically
because we would have just fed the strbuf to opendir(),
which would choke on an empty path.
- in a few cases we indexed the buffer with "original_len"
or similar, rather than the current sb->len, and it is
not immediately obvious from the diff that they are the
same. In all of these cases, I manually verified that
the strbuf does not change between the assignment and
the strbuf_complete call.
This does not convert cases which look like:
if (sb->len && !is_dir_sep(sb->buf[sb->len - 1]))
strbuf_addch(sb, '/');
as those are obviously semantically different. Some of these
cases arguably should be doing that, but that is out of
scope for this change, which aims purely for cleanup with no
behavior change (and at least it will make such sites easier
to find and examine in the future, as we can grep for
strbuf_complete).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-24 21:08:35 +00:00
|
|
|
strbuf_complete(path, '/');
|
2007-09-28 15:28:54 +00:00
|
|
|
|
|
|
|
len = path->len;
|
2021-05-12 17:28:22 +00:00
|
|
|
while ((e = readdir_skip_dot_and_dotdot(dir)) != NULL) {
|
2007-09-28 15:28:54 +00:00
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
strbuf_setlen(path, len);
|
|
|
|
strbuf_addstr(path, e->d_name);
|
2014-01-18 22:48:57 +00:00
|
|
|
if (lstat(path->buf, &st)) {
|
|
|
|
if (errno == ENOENT)
|
|
|
|
/*
|
|
|
|
* file disappeared, which is what we
|
|
|
|
* wanted anyway
|
|
|
|
*/
|
|
|
|
continue;
|
2019-11-05 17:07:23 +00:00
|
|
|
/* fall through */
|
2014-01-18 22:48:57 +00:00
|
|
|
} else if (S_ISDIR(st.st_mode)) {
|
2012-03-15 08:04:12 +00:00
|
|
|
if (!remove_dir_recurse(path, flag, &kept_down))
|
2007-09-28 15:28:54 +00:00
|
|
|
continue; /* happy */
|
2014-01-18 22:48:57 +00:00
|
|
|
} else if (!only_empty &&
|
|
|
|
(!unlink(path->buf) || errno == ENOENT)) {
|
2007-09-28 15:28:54 +00:00
|
|
|
continue; /* happy, too */
|
2014-01-18 22:48:57 +00:00
|
|
|
}
|
2007-09-28 15:28:54 +00:00
|
|
|
|
|
|
|
/* path too long, stat fails, or non-directory still exists */
|
|
|
|
ret = -1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
closedir(dir);
|
|
|
|
|
|
|
|
strbuf_setlen(path, original_len);
|
2021-12-09 05:08:34 +00:00
|
|
|
if (!ret && !keep_toplevel && !kept_down) {
|
|
|
|
if (!purge_original_cwd &&
|
|
|
|
startup_info->original_cwd &&
|
|
|
|
!strcmp(startup_info->original_cwd, path->buf))
|
|
|
|
ret = -1; /* Do not remove current working directory */
|
|
|
|
else
|
|
|
|
ret = (!rmdir(path->buf) || errno == ENOENT) ? 0 : -1;
|
|
|
|
} else if (kept_up)
|
2012-03-15 08:04:12 +00:00
|
|
|
/*
|
|
|
|
* report the uplevel that it is not an error that we
|
|
|
|
* did not rmdir() our directory.
|
|
|
|
*/
|
|
|
|
*kept_up = !ret;
|
2007-09-28 15:28:54 +00:00
|
|
|
return ret;
|
|
|
|
}
|
core.excludesfile clean-up
There are inconsistencies in the way commands currently handle
the core.excludesfile configuration variable. The problem is
the variable is too new to be noticed by anything other than
git-add and git-status.
* git-ls-files does not notice any of the "ignore" files by
default, as it predates the standardized set of ignore files.
The calling scripts established the convention to use
.git/info/exclude, .gitignore, and later core.excludesfile.
* git-add and git-status know about it because they call
add_excludes_from_file() directly with their own notion of
which standard set of ignore files to use. This is just a
stupid duplication of code that need to be updated every time
the definition of the standard set of ignore files is
changed.
* git-read-tree takes --exclude-per-directory=<gitignore>,
not because the flexibility was needed. Again, this was
because the option predates the standardization of the ignore
files.
* git-merge-recursive uses hardcoded per-directory .gitignore
and nothing else. git-clean (scripted version) does not
honor core.* because its call to underlying ls-files does not
know about it. git-clean in C (parked in 'pu') doesn't either.
We probably could change git-ls-files to use the standard set
when no excludes are specified on the command line and ignore
processing was asked, or something like that, but that will be a
change in semantics and might break people's scripts in a subtle
way. I am somewhat reluctant to make such a change.
On the other hand, I think it makes perfect sense to fix
git-read-tree, git-merge-recursive and git-clean to follow the
same rule as other commands. I do not think of a valid use case
to give an exclude-per-directory that is nonstandard to
read-tree command, outside a "negative" test in the t1004 test
script.
This patch is the first step to untangle this mess.
The next step would be to teach read-tree, merge-recursive and
clean (in C) to use setup_standard_excludes().
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-14 08:05:00 +00:00
|
|
|
|
2012-03-15 08:04:12 +00:00
|
|
|
int remove_dir_recursively(struct strbuf *path, int flag)
|
|
|
|
{
|
|
|
|
return remove_dir_recurse(path, flag, NULL);
|
|
|
|
}
|
|
|
|
|
memoize common git-path "constant" files
One of the most common uses of git_path() is to pass a
constant, like git_path("MERGE_MSG"). This has two
drawbacks:
1. The return value is a static buffer, and the lifetime
is dependent on other calls to git_path, etc.
2. There's no compile-time checking of the pathname. This
is OK for a one-off (after all, we have to spell it
correctly at least once), but many of these constant
strings appear throughout the code.
This patch introduces a series of functions to "memoize"
these strings, which are essentially globals for the
lifetime of the program. We compute the value once, take
ownership of the buffer, and return the cached value for
subsequent calls. cache.h provides a helper macro for
defining these functions as one-liners, and defines a few
common ones for global use.
Using a macro is a little bit gross, but it does nicely
document the purpose of the functions. If we need to touch
them all later (e.g., because we learned how to change the
git_dir variable at runtime, and need to invalidate all of
the stored values), it will be much easier to have the
complete list.
Note that the shared-global functions have separate, manual
declarations. We could do something clever with the macros
(e.g., expand it to a declaration in some places, and a
declaration _and_ a definition in path.c). But there aren't
that many, and it's probably better to stay away from
too-magical macros.
Likewise, if we abandon the C preprocessor in favor of
generating these with a script, we could get much fancier.
E.g., normalizing "FOO/BAR-BAZ" into "git_path_foo_bar_baz".
But the small amount of saved typing is probably not worth
the resulting confusion to readers who want to grep for the
function's definition.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-08-10 09:38:57 +00:00
|
|
|
static GIT_PATH_FUNC(git_path_info_exclude, "info/exclude")
|
|
|
|
|
core.excludesfile clean-up
There are inconsistencies in the way commands currently handle
the core.excludesfile configuration variable. The problem is
the variable is too new to be noticed by anything other than
git-add and git-status.
* git-ls-files does not notice any of the "ignore" files by
default, as it predates the standardized set of ignore files.
The calling scripts established the convention to use
.git/info/exclude, .gitignore, and later core.excludesfile.
* git-add and git-status know about it because they call
add_excludes_from_file() directly with their own notion of
which standard set of ignore files to use. This is just a
stupid duplication of code that need to be updated every time
the definition of the standard set of ignore files is
changed.
* git-read-tree takes --exclude-per-directory=<gitignore>,
not because the flexibility was needed. Again, this was
because the option predates the standardization of the ignore
files.
* git-merge-recursive uses hardcoded per-directory .gitignore
and nothing else. git-clean (scripted version) does not
honor core.* because its call to underlying ls-files does not
know about it. git-clean in C (parked in 'pu') doesn't either.
We probably could change git-ls-files to use the standard set
when no excludes are specified on the command line and ignore
processing was asked, or something like that, but that will be a
change in semantics and might break people's scripts in a subtle
way. I am somewhat reluctant to make such a change.
On the other hand, I think it makes perfect sense to fix
git-read-tree, git-merge-recursive and git-clean to follow the
same rule as other commands. I do not think of a valid use case
to give an exclude-per-directory that is nonstandard to
read-tree command, outside a "negative" test in the t1004 test
script.
This patch is the first step to untangle this mess.
The next step would be to teach read-tree, merge-recursive and
clean (in C) to use setup_standard_excludes().
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-14 08:05:00 +00:00
|
|
|
void setup_standard_excludes(struct dir_struct *dir)
|
|
|
|
{
|
|
|
|
dir->exclude_per_dir = ".gitignore";
|
2015-04-22 21:31:49 +00:00
|
|
|
|
2018-06-27 04:46:52 +00:00
|
|
|
/* core.excludesfile defaulting to $XDG_CONFIG_HOME/git/ignore */
|
2015-05-06 08:01:00 +00:00
|
|
|
if (!excludes_file)
|
|
|
|
excludes_file = xdg_config_home("ignore");
|
config: allow inaccessible configuration under $HOME
The changes v1.7.12.1~2^2~4 (config: warn on inaccessible files,
2012-08-21) and v1.8.1.1~22^2~2 (config: treat user and xdg config
permission problems as errors, 2012-10-13) were intended to prevent
important configuration (think "[transfer] fsckobjects") from being
ignored when the configuration is unintentionally unreadable (for
example with EIO on a flaky filesystem, or with ENOMEM due to a DoS
attack). Usually ~/.gitconfig and ~/.config/git are readable by the
current user, and if they aren't then it would be easy to fix those
permissions, so the damage from adding this check should have been
minimal.
Unfortunately the access() check often trips when git is being run as
a server. A daemon (such as inetd or git-daemon) starts as "root",
creates a listening socket, and then drops privileges, meaning that
when git commands are invoked they cannot access $HOME and die with
fatal: unable to access '/root/.config/git/config': Permission denied
Any patch to fix this would have one of three problems:
1. We annoy sysadmins who need to take an extra step to handle HOME
when dropping privileges (the current behavior, or any other
proposal that they have to opt into).
2. We annoy sysadmins who want to set HOME when dropping privileges,
either by making what they want to do impossible, or making them
set an extra variable or option to accomplish what used to work
(e.g., a patch to git-daemon to set HOME when --user is passed).
3. We loosen the check, so some cases which might be noteworthy are
not caught.
This patch is of type (3).
Treat user and xdg configuration that are inaccessible due to
permissions (EACCES) as though no user configuration was provided at
all.
An alternative method would be to check if $HOME is readable, but that
would not help in cases where the user who dropped privileges had a
globally readable HOME with only .config or .gitconfig being private.
This does not change the behavior when /etc/gitconfig or .git/config
is unreadable (since those are more serious configuration errors),
nor when ~/.gitconfig or ~/.config/git is unreadable due to problems
other than permissions.
Signed-off-by: Jonathan Nieder <jrnieder@gmail.com>
Improved-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-12 21:03:18 +00:00
|
|
|
if (excludes_file && !access_or_warn(excludes_file, R_OK, 0))
|
2019-09-03 18:04:57 +00:00
|
|
|
add_patterns_from_file_1(dir, excludes_file,
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->untracked ? &dir->internal.ss_excludes_file : NULL);
|
2015-04-22 21:31:49 +00:00
|
|
|
|
|
|
|
/* per repository user preference */
|
2016-10-20 06:16:41 +00:00
|
|
|
if (startup_info->have_repository) {
|
|
|
|
const char *path = git_path_info_exclude();
|
|
|
|
if (!access_or_warn(path, R_OK, 0))
|
2019-09-03 18:04:57 +00:00
|
|
|
add_patterns_from_file_1(dir, path,
|
2023-02-27 15:28:10 +00:00
|
|
|
dir->untracked ? &dir->internal.ss_info_exclude : NULL);
|
2016-10-20 06:16:41 +00:00
|
|
|
}
|
core.excludesfile clean-up
There are inconsistencies in the way commands currently handle
the core.excludesfile configuration variable. The problem is
the variable is too new to be noticed by anything other than
git-add and git-status.
* git-ls-files does not notice any of the "ignore" files by
default, as it predates the standardized set of ignore files.
The calling scripts established the convention to use
.git/info/exclude, .gitignore, and later core.excludesfile.
* git-add and git-status know about it because they call
add_excludes_from_file() directly with their own notion of
which standard set of ignore files to use. This is just a
stupid duplication of code that need to be updated every time
the definition of the standard set of ignore files is
changed.
* git-read-tree takes --exclude-per-directory=<gitignore>,
not because the flexibility was needed. Again, this was
because the option predates the standardization of the ignore
files.
* git-merge-recursive uses hardcoded per-directory .gitignore
and nothing else. git-clean (scripted version) does not
honor core.* because its call to underlying ls-files does not
know about it. git-clean in C (parked in 'pu') doesn't either.
We probably could change git-ls-files to use the standard set
when no excludes are specified on the command line and ignore
processing was asked, or something like that, but that will be a
change in semantics and might break people's scripts in a subtle
way. I am somewhat reluctant to make such a change.
On the other hand, I think it makes perfect sense to fix
git-read-tree, git-merge-recursive and git-clean to follow the
same rule as other commands. I do not think of a valid use case
to give an exclude-per-directory that is nonstandard to
read-tree command, outside a "negative" test in the t1004 test
script.
This patch is the first step to untangle this mess.
The next step would be to teach read-tree, merge-recursive and
clean (in C) to use setup_standard_excludes().
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2007-11-14 08:05:00 +00:00
|
|
|
}
|
2008-09-26 22:56:46 +00:00
|
|
|
|
2021-01-23 19:58:17 +00:00
|
|
|
char *get_sparse_checkout_filename(void)
|
|
|
|
{
|
|
|
|
return git_pathdup("info/sparse-checkout");
|
|
|
|
}
|
|
|
|
|
|
|
|
int get_sparse_checkout_patterns(struct pattern_list *pl)
|
|
|
|
{
|
|
|
|
int res;
|
|
|
|
char *sparse_filename = get_sparse_checkout_filename();
|
|
|
|
|
|
|
|
pl->use_cone_patterns = core_sparse_checkout_cone;
|
2021-02-16 14:44:28 +00:00
|
|
|
res = add_patterns_from_file_to_list(sparse_filename, "", 0, pl, NULL, 0);
|
2021-01-23 19:58:17 +00:00
|
|
|
|
|
|
|
free(sparse_filename);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2008-09-26 22:56:46 +00:00
|
|
|
int remove_path(const char *name)
|
|
|
|
{
|
|
|
|
char *slash;
|
|
|
|
|
2017-05-30 00:23:33 +00:00
|
|
|
if (unlink(name) && !is_missing_file_error(errno))
|
2008-09-26 22:56:46 +00:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
slash = strrchr(name, '/');
|
|
|
|
if (slash) {
|
|
|
|
char *dirs = xstrdup(name);
|
|
|
|
slash = dirs + (slash - name);
|
|
|
|
do {
|
|
|
|
*slash = '\0';
|
dir: avoid incidentally removing the original_cwd in remove_path()
Modern git often tries to avoid leaving empty directories around when
removing files. Originally, it did not bother. This behavior started
with commit 80e21a9ed809 (merge-recursive::removeFile: remove empty
directories, 2005-11-19), stating the reason simply as:
When the last file in a directory is removed as the result of a
merge, try to rmdir the now-empty directory.
This was reimplemented in C and renamed to remove_path() in commit
e1b3a2cad7 ("Build-in merge-recursive", 2008-02-07), but was still
internal to merge-recursive.
This trend towards removing leading empty directories continued with
commit d9b814cc97f1 (Add builtin "git rm" command, 2006-05-19), which
stated the reasoning as:
The other question is what to do with leading directories. The old
"git rm" script didn't do anything, which is somewhat inconsistent.
This one will actually clean up directories that have become empty
as a result of removing the last file, but maybe we want to have a
flag to decide the behaviour?
remove_path() in dir.c was added in 4a92d1bfb784 (Add remove_path: a
function to remove as much as possible of a path, 2008-09-27), because
it was noted that we had two separate implementations of the same idea
AND both were buggy. It described the purpose of the function as
a function to remove as much as possible of a path
Why remove as much as possible? Well, at the time we probably would
have said something like:
* removing leading directories makes things feel tidy
* removing leading directories doesn't hurt anything so long as they
had no files in them.
But I don't believe those reasons hold when the empty directory happens
to be the current working directory we inherited from our parent
process. Leaving the parent process in a deleted directory can cause
user confusion when subsequent processes fail: any git command, for
example, will immediately fail with
fatal: Unable to read current working directory: No such file or directory
Other commands may similarly get confused. Modify remove_path() so that
the empty leading directories it also deletes does not include the
current working directory we inherited from our parent process. I have
looked through every caller of remove_path() in the current codebase to
make sure that all should take this change.
Acked-by: Derrick Stolee <stolee@gmail.com>
Acked-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-12-09 05:08:33 +00:00
|
|
|
if (startup_info->original_cwd &&
|
|
|
|
!strcmp(startup_info->original_cwd, dirs))
|
|
|
|
break;
|
2010-02-19 05:57:21 +00:00
|
|
|
} while (rmdir(dirs) == 0 && (slash = strrchr(dirs, '/')));
|
2008-09-26 22:56:46 +00:00
|
|
|
free(dirs);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-06 16:58:05 +00:00
|
|
|
/*
|
2020-08-18 22:58:25 +00:00
|
|
|
* Frees memory within dir which was allocated, and resets fields for further
|
|
|
|
* use. Does not free dir itself.
|
2013-01-06 16:58:05 +00:00
|
|
|
*/
|
dir: fix problematic API to avoid memory leaks
The dir structure seemed to have a number of leaks and problems around
it. First I noticed that parent_hashmap and recursive_hashmap were
being leaked (though Peff noticed and submitted fixes before me). Then
I noticed in the previous commit that clear_directory() was only taking
responsibility for a subset of fields within dir_struct, despite the
fact that entries[] and ignored[] we allocated internally to dir.c.
That, of course, resulted in many callers either leaking or haphazardly
trying to free these arrays and their contents.
Digging further, I found that despite the pretty clear documentation
near the top of dir.h that folks were supposed to call clear_directory()
when the user no longer needed the dir_struct, there were four callers
that didn't bother doing that at all. However, two of them clearly
thought about leaks since they had an UNLEAK(dir) directive, which to me
suggests that the method to free the data was too unclear. I suspect
the non-obviousness of the API and its holes led folks to avoid it,
which then snowballed into further problems with the entries[],
ignored[], parent_hashmap, and recursive_hashmap problems.
Rename clear_directory() to dir_clear() to be more in line with other
data structures in git, and introduce a dir_init() to handle the
suggested memsetting of dir_struct to all zeroes. I hope that a name
like "dir_clear()" is more clear, and that the presence of dir_init()
will provide a hint to those looking at the code that they need to look
for either a dir_clear() or a dir_free() and lead them to find
dir_clear().
Signed-off-by: Elijah Newren <newren@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-08-18 22:58:26 +00:00
|
|
|
void dir_clear(struct dir_struct *dir)
|
2013-01-06 16:58:05 +00:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
struct exclude_list_group *group;
|
2019-09-03 18:04:56 +00:00
|
|
|
struct pattern_list *pl;
|
2013-01-06 16:58:05 +00:00
|
|
|
struct exclude_stack *stk;
|
2021-07-01 10:51:27 +00:00
|
|
|
struct dir_struct new = DIR_INIT;
|
2013-01-06 16:58:05 +00:00
|
|
|
|
|
|
|
for (i = EXC_CMDL; i <= EXC_FILE; i++) {
|
2023-02-27 15:28:10 +00:00
|
|
|
group = &dir->internal.exclude_list_group[i];
|
2013-01-06 16:58:05 +00:00
|
|
|
for (j = 0; j < group->nr; j++) {
|
2019-09-03 18:04:56 +00:00
|
|
|
pl = &group->pl[j];
|
2013-01-06 16:58:05 +00:00
|
|
|
if (i == EXC_DIRS)
|
2019-09-03 18:04:56 +00:00
|
|
|
free((char *)pl->src);
|
2019-09-03 18:04:57 +00:00
|
|
|
clear_pattern_list(pl);
|
2013-01-06 16:58:05 +00:00
|
|
|
}
|
2019-09-03 18:04:56 +00:00
|
|
|
free(group->pl);
|
2013-01-06 16:58:05 +00:00
|
|
|
}
|
|
|
|
|
2020-08-18 22:58:25 +00:00
|
|
|
for (i = 0; i < dir->ignored_nr; i++)
|
|
|
|
free(dir->ignored[i]);
|
|
|
|
for (i = 0; i < dir->nr; i++)
|
|
|
|
free(dir->entries[i]);
|
|
|
|
free(dir->ignored);
|
|
|
|
free(dir->entries);
|
|
|
|
|
2023-02-27 15:28:10 +00:00
|
|
|
stk = dir->internal.exclude_stack;
|
2013-01-06 16:58:05 +00:00
|
|
|
while (stk) {
|
|
|
|
struct exclude_stack *prev = stk->prev;
|
|
|
|
free(stk);
|
|
|
|
stk = prev;
|
|
|
|
}
|
2023-02-27 15:28:10 +00:00
|
|
|
strbuf_release(&dir->internal.basebuf);
|
2020-08-18 22:58:25 +00:00
|
|
|
|
2021-07-01 10:51:27 +00:00
|
|
|
memcpy(dir, &new, sizeof(*dir));
|
2013-01-06 16:58:05 +00:00
|
|
|
}
|
2015-03-08 10:12:33 +00:00
|
|
|
|
|
|
|
struct ondisk_untracked_cache {
|
|
|
|
struct stat_data info_exclude_stat;
|
|
|
|
struct stat_data excludes_file_stat;
|
|
|
|
uint32_t dir_flags;
|
|
|
|
};
|
|
|
|
|
2017-07-16 12:17:37 +00:00
|
|
|
#define ouc_offset(x) offsetof(struct ondisk_untracked_cache, x)
|
2015-03-08 10:12:33 +00:00
|
|
|
|
|
|
|
struct write_data {
|
|
|
|
int index; /* number of written untracked_cache_dir */
|
|
|
|
struct ewah_bitmap *check_only; /* from untracked_cache_dir */
|
|
|
|
struct ewah_bitmap *valid; /* from untracked_cache_dir */
|
|
|
|
struct ewah_bitmap *sha1_valid; /* set if exclude_sha1 is not null */
|
|
|
|
struct strbuf out;
|
|
|
|
struct strbuf sb_stat;
|
|
|
|
struct strbuf sb_sha1;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void stat_data_to_disk(struct stat_data *to, const struct stat_data *from)
|
|
|
|
{
|
|
|
|
to->sd_ctime.sec = htonl(from->sd_ctime.sec);
|
|
|
|
to->sd_ctime.nsec = htonl(from->sd_ctime.nsec);
|
|
|
|
to->sd_mtime.sec = htonl(from->sd_mtime.sec);
|
|
|
|
to->sd_mtime.nsec = htonl(from->sd_mtime.nsec);
|
|
|
|
to->sd_dev = htonl(from->sd_dev);
|
|
|
|
to->sd_ino = htonl(from->sd_ino);
|
|
|
|
to->sd_uid = htonl(from->sd_uid);
|
|
|
|
to->sd_gid = htonl(from->sd_gid);
|
|
|
|
to->sd_size = htonl(from->sd_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void write_one_dir(struct untracked_cache_dir *untracked,
|
|
|
|
struct write_data *wd)
|
|
|
|
{
|
|
|
|
struct stat_data stat_data;
|
|
|
|
struct strbuf *out = &wd->out;
|
|
|
|
unsigned char intbuf[16];
|
|
|
|
unsigned int intlen, value;
|
|
|
|
int i = wd->index++;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* untracked_nr should be reset whenever valid is clear, but
|
|
|
|
* for safety..
|
|
|
|
*/
|
|
|
|
if (!untracked->valid) {
|
|
|
|
untracked->untracked_nr = 0;
|
|
|
|
untracked->check_only = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (untracked->check_only)
|
|
|
|
ewah_set(wd->check_only, i);
|
|
|
|
if (untracked->valid) {
|
|
|
|
ewah_set(wd->valid, i);
|
|
|
|
stat_data_to_disk(&stat_data, &untracked->stat_data);
|
|
|
|
strbuf_add(&wd->sb_stat, &stat_data, sizeof(stat_data));
|
|
|
|
}
|
2018-05-02 00:25:48 +00:00
|
|
|
if (!is_null_oid(&untracked->exclude_oid)) {
|
2015-03-08 10:12:33 +00:00
|
|
|
ewah_set(wd->sha1_valid, i);
|
2018-05-02 00:25:48 +00:00
|
|
|
strbuf_add(&wd->sb_sha1, untracked->exclude_oid.hash,
|
|
|
|
the_hash_algo->rawsz);
|
2015-03-08 10:12:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
intlen = encode_varint(untracked->untracked_nr, intbuf);
|
|
|
|
strbuf_add(out, intbuf, intlen);
|
|
|
|
|
|
|
|
/* skip non-recurse directories */
|
|
|
|
for (i = 0, value = 0; i < untracked->dirs_nr; i++)
|
|
|
|
if (untracked->dirs[i]->recurse)
|
|
|
|
value++;
|
|
|
|
intlen = encode_varint(value, intbuf);
|
|
|
|
strbuf_add(out, intbuf, intlen);
|
|
|
|
|
|
|
|
strbuf_add(out, untracked->name, strlen(untracked->name) + 1);
|
|
|
|
|
|
|
|
for (i = 0; i < untracked->untracked_nr; i++)
|
|
|
|
strbuf_add(out, untracked->untracked[i],
|
|
|
|
strlen(untracked->untracked[i]) + 1);
|
|
|
|
|
|
|
|
for (i = 0; i < untracked->dirs_nr; i++)
|
|
|
|
if (untracked->dirs[i]->recurse)
|
|
|
|
write_one_dir(untracked->dirs[i], wd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void write_untracked_extension(struct strbuf *out, struct untracked_cache *untracked)
|
|
|
|
{
|
|
|
|
struct ondisk_untracked_cache *ouc;
|
|
|
|
struct write_data wd;
|
|
|
|
unsigned char varbuf[16];
|
2016-02-22 22:44:42 +00:00
|
|
|
int varint_len;
|
2019-02-19 00:05:23 +00:00
|
|
|
const unsigned hashsz = the_hash_algo->rawsz;
|
2016-02-22 22:44:42 +00:00
|
|
|
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(ouc, 1);
|
2015-03-08 10:12:33 +00:00
|
|
|
stat_data_to_disk(&ouc->info_exclude_stat, &untracked->ss_info_exclude.stat);
|
|
|
|
stat_data_to_disk(&ouc->excludes_file_stat, &untracked->ss_excludes_file.stat);
|
|
|
|
ouc->dir_flags = htonl(untracked->dir_flags);
|
2015-03-08 10:12:46 +00:00
|
|
|
|
|
|
|
varint_len = encode_varint(untracked->ident.len, varbuf);
|
|
|
|
strbuf_add(out, varbuf, varint_len);
|
2016-07-19 18:36:29 +00:00
|
|
|
strbuf_addbuf(out, &untracked->ident);
|
2015-03-08 10:12:46 +00:00
|
|
|
|
2019-02-19 00:05:23 +00:00
|
|
|
strbuf_add(out, ouc, sizeof(*ouc));
|
|
|
|
strbuf_add(out, untracked->ss_info_exclude.oid.hash, hashsz);
|
|
|
|
strbuf_add(out, untracked->ss_excludes_file.oid.hash, hashsz);
|
|
|
|
strbuf_add(out, untracked->exclude_per_dir, strlen(untracked->exclude_per_dir) + 1);
|
2017-06-15 23:15:46 +00:00
|
|
|
FREE_AND_NULL(ouc);
|
2015-03-08 10:12:33 +00:00
|
|
|
|
|
|
|
if (!untracked->root) {
|
|
|
|
varint_len = encode_varint(0, varbuf);
|
|
|
|
strbuf_add(out, varbuf, varint_len);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
wd.index = 0;
|
|
|
|
wd.check_only = ewah_new();
|
|
|
|
wd.valid = ewah_new();
|
|
|
|
wd.sha1_valid = ewah_new();
|
|
|
|
strbuf_init(&wd.out, 1024);
|
|
|
|
strbuf_init(&wd.sb_stat, 1024);
|
|
|
|
strbuf_init(&wd.sb_sha1, 1024);
|
|
|
|
write_one_dir(untracked->root, &wd);
|
|
|
|
|
|
|
|
varint_len = encode_varint(wd.index, varbuf);
|
|
|
|
strbuf_add(out, varbuf, varint_len);
|
|
|
|
strbuf_addbuf(out, &wd.out);
|
|
|
|
ewah_serialize_strbuf(wd.valid, out);
|
|
|
|
ewah_serialize_strbuf(wd.check_only, out);
|
|
|
|
ewah_serialize_strbuf(wd.sha1_valid, out);
|
|
|
|
strbuf_addbuf(out, &wd.sb_stat);
|
|
|
|
strbuf_addbuf(out, &wd.sb_sha1);
|
|
|
|
strbuf_addch(out, '\0'); /* safe guard for string lists */
|
|
|
|
|
|
|
|
ewah_free(wd.valid);
|
|
|
|
ewah_free(wd.check_only);
|
|
|
|
ewah_free(wd.sha1_valid);
|
|
|
|
strbuf_release(&wd.out);
|
|
|
|
strbuf_release(&wd.sb_stat);
|
|
|
|
strbuf_release(&wd.sb_sha1);
|
|
|
|
}
|
2015-03-08 10:12:34 +00:00
|
|
|
|
|
|
|
static void free_untracked(struct untracked_cache_dir *ucd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
if (!ucd)
|
|
|
|
return;
|
|
|
|
for (i = 0; i < ucd->dirs_nr; i++)
|
|
|
|
free_untracked(ucd->dirs[i]);
|
|
|
|
for (i = 0; i < ucd->untracked_nr; i++)
|
|
|
|
free(ucd->untracked[i]);
|
|
|
|
free(ucd->untracked);
|
|
|
|
free(ucd->dirs);
|
|
|
|
free(ucd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void free_untracked_cache(struct untracked_cache *uc)
|
|
|
|
{
|
2022-11-08 18:17:41 +00:00
|
|
|
if (!uc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
free(uc->exclude_per_dir_to_free);
|
|
|
|
strbuf_release(&uc->ident);
|
|
|
|
free_untracked(uc->root);
|
2015-03-08 10:12:34 +00:00
|
|
|
free(uc);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct read_data {
|
|
|
|
int index;
|
|
|
|
struct untracked_cache_dir **ucd;
|
|
|
|
struct ewah_bitmap *check_only;
|
|
|
|
struct ewah_bitmap *valid;
|
|
|
|
struct ewah_bitmap *sha1_valid;
|
|
|
|
const unsigned char *data;
|
|
|
|
const unsigned char *end;
|
|
|
|
};
|
|
|
|
|
2017-07-16 12:17:37 +00:00
|
|
|
static void stat_data_from_disk(struct stat_data *to, const unsigned char *data)
|
2015-03-08 10:12:34 +00:00
|
|
|
{
|
2017-07-16 12:17:37 +00:00
|
|
|
memcpy(to, data, sizeof(*to));
|
|
|
|
to->sd_ctime.sec = ntohl(to->sd_ctime.sec);
|
|
|
|
to->sd_ctime.nsec = ntohl(to->sd_ctime.nsec);
|
|
|
|
to->sd_mtime.sec = ntohl(to->sd_mtime.sec);
|
|
|
|
to->sd_mtime.nsec = ntohl(to->sd_mtime.nsec);
|
|
|
|
to->sd_dev = ntohl(to->sd_dev);
|
|
|
|
to->sd_ino = ntohl(to->sd_ino);
|
|
|
|
to->sd_uid = ntohl(to->sd_uid);
|
|
|
|
to->sd_gid = ntohl(to->sd_gid);
|
|
|
|
to->sd_size = ntohl(to->sd_size);
|
2015-03-08 10:12:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int read_one_dir(struct untracked_cache_dir **untracked_,
|
|
|
|
struct read_data *rd)
|
|
|
|
{
|
|
|
|
struct untracked_cache_dir ud, *untracked;
|
2019-04-18 21:17:38 +00:00
|
|
|
const unsigned char *data = rd->data, *end = rd->end;
|
2019-04-18 21:17:02 +00:00
|
|
|
const unsigned char *eos;
|
2015-03-08 10:12:34 +00:00
|
|
|
unsigned int value;
|
2019-04-18 21:18:35 +00:00
|
|
|
int i;
|
2015-03-08 10:12:34 +00:00
|
|
|
|
|
|
|
memset(&ud, 0, sizeof(ud));
|
|
|
|
|
2019-04-18 21:17:38 +00:00
|
|
|
value = decode_varint(&data);
|
|
|
|
if (data > end)
|
2015-03-08 10:12:34 +00:00
|
|
|
return -1;
|
|
|
|
ud.recurse = 1;
|
|
|
|
ud.untracked_alloc = value;
|
|
|
|
ud.untracked_nr = value;
|
|
|
|
if (ud.untracked_nr)
|
2016-02-22 22:44:25 +00:00
|
|
|
ALLOC_ARRAY(ud.untracked, ud.untracked_nr);
|
2015-03-08 10:12:34 +00:00
|
|
|
|
2019-04-18 21:17:38 +00:00
|
|
|
ud.dirs_alloc = ud.dirs_nr = decode_varint(&data);
|
|
|
|
if (data > end)
|
2015-03-08 10:12:34 +00:00
|
|
|
return -1;
|
2016-02-22 22:44:25 +00:00
|
|
|
ALLOC_ARRAY(ud.dirs, ud.dirs_nr);
|
2015-03-08 10:12:34 +00:00
|
|
|
|
2019-04-18 21:17:02 +00:00
|
|
|
eos = memchr(data, '\0', end - data);
|
|
|
|
if (!eos || eos == end)
|
2015-03-08 10:12:34 +00:00
|
|
|
return -1;
|
2019-04-18 21:17:02 +00:00
|
|
|
|
2019-04-18 21:18:35 +00:00
|
|
|
*untracked_ = untracked = xmalloc(st_add3(sizeof(*untracked), eos - data, 1));
|
2015-03-08 10:12:34 +00:00
|
|
|
memcpy(untracked, &ud, sizeof(ud));
|
2019-04-18 21:18:35 +00:00
|
|
|
memcpy(untracked->name, data, eos - data + 1);
|
2019-04-18 21:17:38 +00:00
|
|
|
data = eos + 1;
|
2015-03-08 10:12:34 +00:00
|
|
|
|
|
|
|
for (i = 0; i < untracked->untracked_nr; i++) {
|
2019-04-18 21:17:02 +00:00
|
|
|
eos = memchr(data, '\0', end - data);
|
|
|
|
if (!eos || eos == end)
|
2015-03-08 10:12:34 +00:00
|
|
|
return -1;
|
2019-04-18 21:18:35 +00:00
|
|
|
untracked->untracked[i] = xmemdupz(data, eos - data);
|
2019-04-18 21:17:38 +00:00
|
|
|
data = eos + 1;
|
2015-03-08 10:12:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
rd->ucd[rd->index++] = untracked;
|
|
|
|
rd->data = data;
|
|
|
|
|
|
|
|
for (i = 0; i < untracked->dirs_nr; i++) {
|
2019-04-18 21:18:35 +00:00
|
|
|
if (read_one_dir(untracked->dirs + i, rd) < 0)
|
2015-03-08 10:12:34 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_check_only(size_t pos, void *cb)
|
|
|
|
{
|
|
|
|
struct read_data *rd = cb;
|
|
|
|
struct untracked_cache_dir *ud = rd->ucd[pos];
|
|
|
|
ud->check_only = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void read_stat(size_t pos, void *cb)
|
|
|
|
{
|
|
|
|
struct read_data *rd = cb;
|
|
|
|
struct untracked_cache_dir *ud = rd->ucd[pos];
|
|
|
|
if (rd->data + sizeof(struct stat_data) > rd->end) {
|
|
|
|
rd->data = rd->end + 1;
|
|
|
|
return;
|
|
|
|
}
|
2017-07-16 12:17:37 +00:00
|
|
|
stat_data_from_disk(&ud->stat_data, rd->data);
|
2015-03-08 10:12:34 +00:00
|
|
|
rd->data += sizeof(struct stat_data);
|
|
|
|
ud->valid = 1;
|
|
|
|
}
|
|
|
|
|
2018-05-02 00:25:48 +00:00
|
|
|
static void read_oid(size_t pos, void *cb)
|
2015-03-08 10:12:34 +00:00
|
|
|
{
|
|
|
|
struct read_data *rd = cb;
|
|
|
|
struct untracked_cache_dir *ud = rd->ucd[pos];
|
2018-05-02 00:25:48 +00:00
|
|
|
if (rd->data + the_hash_algo->rawsz > rd->end) {
|
2015-03-08 10:12:34 +00:00
|
|
|
rd->data = rd->end + 1;
|
|
|
|
return;
|
|
|
|
}
|
2021-04-26 01:02:50 +00:00
|
|
|
oidread(&ud->exclude_oid, rd->data);
|
2018-05-02 00:25:48 +00:00
|
|
|
rd->data += the_hash_algo->rawsz;
|
2015-03-08 10:12:34 +00:00
|
|
|
}
|
|
|
|
|
2018-01-28 00:13:12 +00:00
|
|
|
static void load_oid_stat(struct oid_stat *oid_stat, const unsigned char *data,
|
|
|
|
const unsigned char *sha1)
|
2015-03-08 10:12:34 +00:00
|
|
|
{
|
2018-01-28 00:13:12 +00:00
|
|
|
stat_data_from_disk(&oid_stat->stat, data);
|
2021-04-26 01:02:50 +00:00
|
|
|
oidread(&oid_stat->oid, sha1);
|
2018-01-28 00:13:12 +00:00
|
|
|
oid_stat->valid = 1;
|
2015-03-08 10:12:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct untracked_cache *read_untracked_extension(const void *data, unsigned long sz)
|
|
|
|
{
|
|
|
|
struct untracked_cache *uc;
|
|
|
|
struct read_data rd;
|
|
|
|
const unsigned char *next = data, *end = (const unsigned char *)data + sz;
|
2015-03-08 10:12:46 +00:00
|
|
|
const char *ident;
|
2018-06-15 03:44:43 +00:00
|
|
|
int ident_len;
|
|
|
|
ssize_t len;
|
2017-07-16 12:17:37 +00:00
|
|
|
const char *exclude_per_dir;
|
2019-02-19 00:05:23 +00:00
|
|
|
const unsigned hashsz = the_hash_algo->rawsz;
|
|
|
|
const unsigned offset = sizeof(struct ondisk_untracked_cache);
|
|
|
|
const unsigned exclude_per_dir_offset = offset + 2 * hashsz;
|
2015-03-08 10:12:34 +00:00
|
|
|
|
|
|
|
if (sz <= 1 || end[-1] != '\0')
|
|
|
|
return NULL;
|
|
|
|
end--;
|
|
|
|
|
2015-03-08 10:12:46 +00:00
|
|
|
ident_len = decode_varint(&next);
|
|
|
|
if (next + ident_len > end)
|
|
|
|
return NULL;
|
|
|
|
ident = (const char *)next;
|
|
|
|
next += ident_len;
|
|
|
|
|
2019-02-19 00:05:23 +00:00
|
|
|
if (next + exclude_per_dir_offset + 1 > end)
|
2015-03-08 10:12:34 +00:00
|
|
|
return NULL;
|
|
|
|
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(uc, 1);
|
2015-03-08 10:12:46 +00:00
|
|
|
strbuf_init(&uc->ident, ident_len);
|
|
|
|
strbuf_add(&uc->ident, ident, ident_len);
|
2018-01-28 00:13:12 +00:00
|
|
|
load_oid_stat(&uc->ss_info_exclude,
|
|
|
|
next + ouc_offset(info_exclude_stat),
|
2019-02-19 00:05:23 +00:00
|
|
|
next + offset);
|
2018-01-28 00:13:12 +00:00
|
|
|
load_oid_stat(&uc->ss_excludes_file,
|
|
|
|
next + ouc_offset(excludes_file_stat),
|
2019-02-19 00:05:23 +00:00
|
|
|
next + offset + hashsz);
|
2017-07-16 12:17:37 +00:00
|
|
|
uc->dir_flags = get_be32(next + ouc_offset(dir_flags));
|
2019-02-19 00:05:23 +00:00
|
|
|
exclude_per_dir = (const char *)next + exclude_per_dir_offset;
|
2022-11-08 18:17:41 +00:00
|
|
|
uc->exclude_per_dir = uc->exclude_per_dir_to_free = xstrdup(exclude_per_dir);
|
2015-03-08 10:12:34 +00:00
|
|
|
/* NUL after exclude_per_dir is covered by sizeof(*ouc) */
|
2019-02-19 00:05:23 +00:00
|
|
|
next += exclude_per_dir_offset + strlen(exclude_per_dir) + 1;
|
2015-03-08 10:12:34 +00:00
|
|
|
if (next >= end)
|
|
|
|
goto done2;
|
|
|
|
|
|
|
|
len = decode_varint(&next);
|
|
|
|
if (next > end || len == 0)
|
|
|
|
goto done2;
|
|
|
|
|
|
|
|
rd.valid = ewah_new();
|
|
|
|
rd.check_only = ewah_new();
|
|
|
|
rd.sha1_valid = ewah_new();
|
|
|
|
rd.data = next;
|
|
|
|
rd.end = end;
|
|
|
|
rd.index = 0;
|
2016-02-22 22:44:25 +00:00
|
|
|
ALLOC_ARRAY(rd.ucd, len);
|
2015-03-08 10:12:34 +00:00
|
|
|
|
|
|
|
if (read_one_dir(&uc->root, &rd) || rd.index != len)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
next = rd.data;
|
|
|
|
len = ewah_read_mmap(rd.valid, next, end - next);
|
|
|
|
if (len < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
next += len;
|
|
|
|
len = ewah_read_mmap(rd.check_only, next, end - next);
|
|
|
|
if (len < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
next += len;
|
|
|
|
len = ewah_read_mmap(rd.sha1_valid, next, end - next);
|
|
|
|
if (len < 0)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
ewah_each_bit(rd.check_only, set_check_only, &rd);
|
|
|
|
rd.data = next + len;
|
|
|
|
ewah_each_bit(rd.valid, read_stat, &rd);
|
2018-05-02 00:25:48 +00:00
|
|
|
ewah_each_bit(rd.sha1_valid, read_oid, &rd);
|
2015-03-08 10:12:34 +00:00
|
|
|
next = rd.data;
|
|
|
|
|
|
|
|
done:
|
|
|
|
free(rd.ucd);
|
|
|
|
ewah_free(rd.valid);
|
|
|
|
ewah_free(rd.check_only);
|
|
|
|
ewah_free(rd.sha1_valid);
|
|
|
|
done2:
|
|
|
|
if (next != end) {
|
|
|
|
free_untracked_cache(uc);
|
|
|
|
uc = NULL;
|
|
|
|
}
|
|
|
|
return uc;
|
|
|
|
}
|
2015-03-08 10:12:35 +00:00
|
|
|
|
2015-08-19 13:01:26 +00:00
|
|
|
static void invalidate_one_directory(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *ucd)
|
|
|
|
{
|
|
|
|
uc->dir_invalidated++;
|
|
|
|
ucd->valid = 0;
|
|
|
|
ucd->untracked_nr = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally when an entry is added or removed from a directory,
|
|
|
|
* invalidating that directory is enough. No need to touch its
|
|
|
|
* ancestors. When a directory is shown as "foo/bar/" in git-status
|
|
|
|
* however, deleting or adding an entry may have cascading effect.
|
|
|
|
*
|
|
|
|
* Say the "foo/bar/file" has become untracked, we need to tell the
|
|
|
|
* untracked_cache_dir of "foo" that "bar/" is not an untracked
|
|
|
|
* directory any more (because "bar" is managed by foo as an untracked
|
|
|
|
* "file").
|
|
|
|
*
|
|
|
|
* Similarly, if "foo/bar/file" moves from untracked to tracked and it
|
|
|
|
* was the last untracked entry in the entire "foo", we should show
|
|
|
|
* "foo/" instead. Which means we have to invalidate past "bar" up to
|
|
|
|
* "foo".
|
|
|
|
*
|
|
|
|
* This function traverses all directories from root to leaf. If there
|
|
|
|
* is a chance of one of the above cases happening, we invalidate back
|
|
|
|
* to root. Otherwise we just invalidate the leaf. There may be a more
|
|
|
|
* sophisticated way than checking for SHOW_OTHER_DIRECTORIES to
|
|
|
|
* detect these cases and avoid unnecessary invalidation, for example,
|
|
|
|
* checking for the untracked entry named "bar/" in "foo", but for now
|
|
|
|
* stick to something safe and simple.
|
|
|
|
*/
|
|
|
|
static int invalidate_one_component(struct untracked_cache *uc,
|
|
|
|
struct untracked_cache_dir *dir,
|
|
|
|
const char *path, int len)
|
|
|
|
{
|
|
|
|
const char *rest = strchr(path, '/');
|
|
|
|
|
|
|
|
if (rest) {
|
|
|
|
int component_len = rest - path;
|
|
|
|
struct untracked_cache_dir *d =
|
|
|
|
lookup_untracked(uc, dir, path, component_len);
|
|
|
|
int ret =
|
|
|
|
invalidate_one_component(uc, d, rest + 1,
|
|
|
|
len - (component_len + 1));
|
|
|
|
if (ret)
|
|
|
|
invalidate_one_directory(uc, dir);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
invalidate_one_directory(uc, dir);
|
|
|
|
return uc->dir_flags & DIR_SHOW_OTHER_DIRECTORIES;
|
|
|
|
}
|
|
|
|
|
2015-03-08 10:12:35 +00:00
|
|
|
void untracked_cache_invalidate_path(struct index_state *istate,
|
dir.c: ignore paths containing .git when invalidating untracked cache
read_directory() code ignores all paths named ".git" even if it's not
a valid git repository. See treat_path() for details. Since ".git" is
basically invisible to read_directory(), when we are asked to
invalidate a path that contains ".git", we can safely ignore it
because the slow path would not consider it anyway.
This helps when fsmonitor is used and we have a real ".git" repo at
worktree top. Occasionally .git/index will be updated and if the
fsmonitor hook does not filter it, untracked cache is asked to
invalidate the path ".git/index".
Without this patch, we invalidate the root directory unncessarily,
which:
- makes read_directory() fall back to slow path for root directory
(slower)
- makes the index dirty (because UNTR extension is updated). Depending
on the index size, writing it down could also be slow.
A note about the new "safe_path" knob. Since this new check could be
relatively expensive, avoid it when we know it's not needed. If the
path comes from the index, it can't contain ".git". If it does
contain, we may be screwed up at many more levels, not just this one.
Noticed-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-07 09:21:40 +00:00
|
|
|
const char *path, int safe_path)
|
2015-03-08 10:12:35 +00:00
|
|
|
{
|
|
|
|
if (!istate->untracked || !istate->untracked->root)
|
|
|
|
return;
|
2018-05-22 05:25:26 +00:00
|
|
|
if (!safe_path && !verify_path(path, 0))
|
dir.c: ignore paths containing .git when invalidating untracked cache
read_directory() code ignores all paths named ".git" even if it's not
a valid git repository. See treat_path() for details. Since ".git" is
basically invisible to read_directory(), when we are asked to
invalidate a path that contains ".git", we can safely ignore it
because the slow path would not consider it anyway.
This helps when fsmonitor is used and we have a real ".git" repo at
worktree top. Occasionally .git/index will be updated and if the
fsmonitor hook does not filter it, untracked cache is asked to
invalidate the path ".git/index".
Without this patch, we invalidate the root directory unncessarily,
which:
- makes read_directory() fall back to slow path for root directory
(slower)
- makes the index dirty (because UNTR extension is updated). Depending
on the index size, writing it down could also be slow.
A note about the new "safe_path" knob. Since this new check could be
relatively expensive, avoid it when we know it's not needed. If the
path comes from the index, it can't contain ".git". If it does
contain, we may be screwed up at many more levels, not just this one.
Noticed-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-07 09:21:40 +00:00
|
|
|
return;
|
2015-08-19 13:01:26 +00:00
|
|
|
invalidate_one_component(istate->untracked, istate->untracked->root,
|
|
|
|
path, strlen(path));
|
2015-03-08 10:12:35 +00:00
|
|
|
}
|
|
|
|
|
2024-02-26 21:39:17 +00:00
|
|
|
void untracked_cache_invalidate_trimmed_path(struct index_state *istate,
|
|
|
|
const char *path,
|
|
|
|
int safe_path)
|
|
|
|
{
|
|
|
|
size_t len = strlen(path);
|
|
|
|
|
|
|
|
if (!len)
|
|
|
|
BUG("untracked_cache_invalidate_trimmed_path given zero length path");
|
|
|
|
|
|
|
|
if (path[len - 1] != '/') {
|
|
|
|
untracked_cache_invalidate_path(istate, path, safe_path);
|
|
|
|
} else {
|
|
|
|
struct strbuf tmp = STRBUF_INIT;
|
|
|
|
|
|
|
|
strbuf_add(&tmp, path, len - 1);
|
|
|
|
untracked_cache_invalidate_path(istate, tmp.buf, safe_path);
|
|
|
|
strbuf_release(&tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-08 10:12:35 +00:00
|
|
|
void untracked_cache_remove_from_index(struct index_state *istate,
|
|
|
|
const char *path)
|
|
|
|
{
|
dir.c: ignore paths containing .git when invalidating untracked cache
read_directory() code ignores all paths named ".git" even if it's not
a valid git repository. See treat_path() for details. Since ".git" is
basically invisible to read_directory(), when we are asked to
invalidate a path that contains ".git", we can safely ignore it
because the slow path would not consider it anyway.
This helps when fsmonitor is used and we have a real ".git" repo at
worktree top. Occasionally .git/index will be updated and if the
fsmonitor hook does not filter it, untracked cache is asked to
invalidate the path ".git/index".
Without this patch, we invalidate the root directory unncessarily,
which:
- makes read_directory() fall back to slow path for root directory
(slower)
- makes the index dirty (because UNTR extension is updated). Depending
on the index size, writing it down could also be slow.
A note about the new "safe_path" knob. Since this new check could be
relatively expensive, avoid it when we know it's not needed. If the
path comes from the index, it can't contain ".git". If it does
contain, we may be screwed up at many more levels, not just this one.
Noticed-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-07 09:21:40 +00:00
|
|
|
untracked_cache_invalidate_path(istate, path, 1);
|
2015-03-08 10:12:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void untracked_cache_add_to_index(struct index_state *istate,
|
|
|
|
const char *path)
|
|
|
|
{
|
dir.c: ignore paths containing .git when invalidating untracked cache
read_directory() code ignores all paths named ".git" even if it's not
a valid git repository. See treat_path() for details. Since ".git" is
basically invisible to read_directory(), when we are asked to
invalidate a path that contains ".git", we can safely ignore it
because the slow path would not consider it anyway.
This helps when fsmonitor is used and we have a real ".git" repo at
worktree top. Occasionally .git/index will be updated and if the
fsmonitor hook does not filter it, untracked cache is asked to
invalidate the path ".git/index".
Without this patch, we invalidate the root directory unncessarily,
which:
- makes read_directory() fall back to slow path for root directory
(slower)
- makes the index dirty (because UNTR extension is updated). Depending
on the index size, writing it down could also be slow.
A note about the new "safe_path" knob. Since this new check could be
relatively expensive, avoid it when we know it's not needed. If the
path comes from the index, it can't contain ".git". If it does
contain, we may be screwed up at many more levels, not just this one.
Noticed-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Nguyễn Thái Ngọc Duy <pclouds@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2018-02-07 09:21:40 +00:00
|
|
|
untracked_cache_invalidate_path(istate, path, 1);
|
2015-03-08 10:12:35 +00:00
|
|
|
}
|
2016-12-12 19:04:34 +00:00
|
|
|
|
2018-03-28 22:35:31 +00:00
|
|
|
static void connect_wt_gitdir_in_nested(const char *sub_worktree,
|
|
|
|
const char *sub_gitdir)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct repository subrepo;
|
|
|
|
struct strbuf sub_wt = STRBUF_INIT;
|
|
|
|
struct strbuf sub_gd = STRBUF_INIT;
|
|
|
|
|
|
|
|
const struct submodule *sub;
|
|
|
|
|
|
|
|
/* If the submodule has no working tree, we can ignore it. */
|
|
|
|
if (repo_init(&subrepo, sub_gitdir, sub_worktree))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (repo_read_index(&subrepo) < 0)
|
2018-07-21 07:49:30 +00:00
|
|
|
die(_("index file corrupt in repo %s"), subrepo.gitdir);
|
2018-03-28 22:35:31 +00:00
|
|
|
|
2021-04-01 01:49:54 +00:00
|
|
|
/* TODO: audit for interaction with sparse-index. */
|
|
|
|
ensure_full_index(subrepo.index);
|
2018-03-28 22:35:31 +00:00
|
|
|
for (i = 0; i < subrepo.index->cache_nr; i++) {
|
|
|
|
const struct cache_entry *ce = subrepo.index->cache[i];
|
|
|
|
|
|
|
|
if (!S_ISGITLINK(ce->ce_mode))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
while (i + 1 < subrepo.index->cache_nr &&
|
|
|
|
!strcmp(ce->name, subrepo.index->cache[i + 1]->name))
|
|
|
|
/*
|
|
|
|
* Skip entries with the same name in different stages
|
|
|
|
* to make sure an entry is returned only once.
|
|
|
|
*/
|
|
|
|
i++;
|
|
|
|
|
2021-04-26 01:02:56 +00:00
|
|
|
sub = submodule_from_path(&subrepo, null_oid(), ce->name);
|
2018-03-28 22:35:31 +00:00
|
|
|
if (!sub || !is_submodule_active(&subrepo, ce->name))
|
|
|
|
/* .gitmodules broken or inactive sub */
|
|
|
|
continue;
|
|
|
|
|
|
|
|
strbuf_reset(&sub_wt);
|
|
|
|
strbuf_reset(&sub_gd);
|
|
|
|
strbuf_addf(&sub_wt, "%s/%s", sub_worktree, sub->path);
|
2021-09-15 18:59:19 +00:00
|
|
|
submodule_name_to_gitdir(&sub_gd, &subrepo, sub->name);
|
2018-03-28 22:35:31 +00:00
|
|
|
|
|
|
|
connect_work_tree_and_git_dir(sub_wt.buf, sub_gd.buf, 1);
|
|
|
|
}
|
|
|
|
strbuf_release(&sub_wt);
|
|
|
|
strbuf_release(&sub_gd);
|
|
|
|
repo_clear(&subrepo);
|
|
|
|
}
|
|
|
|
|
|
|
|
void connect_work_tree_and_git_dir(const char *work_tree_,
|
|
|
|
const char *git_dir_,
|
|
|
|
int recurse_into_nested)
|
2016-12-12 19:04:34 +00:00
|
|
|
{
|
2017-03-14 21:46:24 +00:00
|
|
|
struct strbuf gitfile_sb = STRBUF_INIT;
|
|
|
|
struct strbuf cfg_sb = STRBUF_INIT;
|
2016-12-12 19:04:34 +00:00
|
|
|
struct strbuf rel_path = STRBUF_INIT;
|
2017-03-14 21:46:24 +00:00
|
|
|
char *git_dir, *work_tree;
|
2016-12-12 19:04:34 +00:00
|
|
|
|
2017-03-14 21:46:24 +00:00
|
|
|
/* Prepare .git file */
|
|
|
|
strbuf_addf(&gitfile_sb, "%s/.git", work_tree_);
|
|
|
|
if (safe_create_leading_directories_const(gitfile_sb.buf))
|
|
|
|
die(_("could not create directories for %s"), gitfile_sb.buf);
|
|
|
|
|
|
|
|
/* Prepare config file */
|
|
|
|
strbuf_addf(&cfg_sb, "%s/config", git_dir_);
|
|
|
|
if (safe_create_leading_directories_const(cfg_sb.buf))
|
|
|
|
die(_("could not create directories for %s"), cfg_sb.buf);
|
2016-12-12 19:04:34 +00:00
|
|
|
|
2017-03-28 21:05:58 +00:00
|
|
|
git_dir = real_pathdup(git_dir_, 1);
|
|
|
|
work_tree = real_pathdup(work_tree_, 1);
|
2017-03-14 21:46:24 +00:00
|
|
|
|
|
|
|
/* Write .git file */
|
|
|
|
write_file(gitfile_sb.buf, "gitdir: %s",
|
|
|
|
relative_path(git_dir, work_tree, &rel_path));
|
2016-12-12 19:04:34 +00:00
|
|
|
/* Update core.worktree setting */
|
2017-03-14 21:46:24 +00:00
|
|
|
git_config_set_in_file(cfg_sb.buf, "core.worktree",
|
2016-12-12 19:04:34 +00:00
|
|
|
relative_path(work_tree, git_dir, &rel_path));
|
|
|
|
|
2017-03-14 21:46:24 +00:00
|
|
|
strbuf_release(&gitfile_sb);
|
|
|
|
strbuf_release(&cfg_sb);
|
2016-12-12 19:04:34 +00:00
|
|
|
strbuf_release(&rel_path);
|
2018-03-28 22:35:31 +00:00
|
|
|
|
|
|
|
if (recurse_into_nested)
|
|
|
|
connect_wt_gitdir_in_nested(work_tree, git_dir);
|
|
|
|
|
2016-12-12 19:04:34 +00:00
|
|
|
free(work_tree);
|
|
|
|
free(git_dir);
|
|
|
|
}
|
2016-12-12 19:04:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Migrate the git directory of the given path from old_git_dir to new_git_dir.
|
|
|
|
*/
|
|
|
|
void relocate_gitdir(const char *path, const char *old_git_dir, const char *new_git_dir)
|
|
|
|
{
|
|
|
|
if (rename(old_git_dir, new_git_dir) < 0)
|
|
|
|
die_errno(_("could not migrate git directory from '%s' to '%s'"),
|
|
|
|
old_git_dir, new_git_dir);
|
|
|
|
|
2018-03-28 22:35:31 +00:00
|
|
|
connect_work_tree_and_git_dir(path, new_git_dir, 0);
|
2016-12-12 19:04:35 +00:00
|
|
|
}
|
2022-05-16 20:10:59 +00:00
|
|
|
|
|
|
|
int path_match_flags(const char *const str, const enum path_match_flags flags)
|
|
|
|
{
|
|
|
|
const char *p = str;
|
|
|
|
|
|
|
|
if (flags & PATH_MATCH_NATIVE &&
|
|
|
|
flags & PATH_MATCH_XPLATFORM)
|
|
|
|
BUG("path_match_flags() must get one match kind, not multiple!");
|
|
|
|
else if (!(flags & PATH_MATCH_KINDS_MASK))
|
|
|
|
BUG("path_match_flags() must get at least one match kind!");
|
|
|
|
|
|
|
|
if (flags & PATH_MATCH_STARTS_WITH_DOT_SLASH &&
|
|
|
|
flags & PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH)
|
|
|
|
BUG("path_match_flags() must get one platform kind, not multiple!");
|
|
|
|
else if (!(flags & PATH_MATCH_PLATFORM_MASK))
|
|
|
|
BUG("path_match_flags() must get at least one platform kind!");
|
|
|
|
|
|
|
|
if (*p++ != '.')
|
|
|
|
return 0;
|
|
|
|
if (flags & PATH_MATCH_STARTS_WITH_DOT_DOT_SLASH &&
|
|
|
|
*p++ != '.')
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (flags & PATH_MATCH_NATIVE)
|
|
|
|
return is_dir_sep(*p);
|
|
|
|
else if (flags & PATH_MATCH_XPLATFORM)
|
|
|
|
return is_xplatform_dir_sep(*p);
|
|
|
|
BUG("unreachable");
|
|
|
|
}
|