2023-05-16 06:33:49 +00:00
|
|
|
#include "git-compat-util.h"
|
2005-07-31 19:17:43 +00:00
|
|
|
#include "run-command.h"
|
2023-03-21 06:26:03 +00:00
|
|
|
#include "environment.h"
|
2018-04-10 21:26:18 +00:00
|
|
|
#include "exec-cmd.h"
|
2023-03-21 06:25:54 +00:00
|
|
|
#include "gettext.h"
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
#include "sigchain.h"
|
2020-07-28 20:23:39 +00:00
|
|
|
#include "strvec.h"
|
2023-04-22 20:17:09 +00:00
|
|
|
#include "symlinks.h"
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
#include "thread-utils.h"
|
|
|
|
#include "strbuf.h"
|
2017-10-06 08:07:55 +00:00
|
|
|
#include "string-list.h"
|
2023-04-11 03:00:38 +00:00
|
|
|
#include "trace.h"
|
|
|
|
#include "trace2.h"
|
2018-01-18 09:45:09 +00:00
|
|
|
#include "quote.h"
|
2020-08-28 15:45:12 +00:00
|
|
|
#include "config.h"
|
2021-09-09 09:47:06 +00:00
|
|
|
#include "packfile.h"
|
2021-09-26 19:03:26 +00:00
|
|
|
#include "hook.h"
|
pipe_command(): mark stdin descriptor as non-blocking
Our pipe_command() helper lets you both write to and read from a child
process on its stdin/stdout. It's supposed to work without deadlocks
because we use poll() to check when descriptors are ready for reading or
writing. But there's a bug: if both the data to be written and the data
to be read back exceed the pipe buffer, we'll deadlock.
The issue is that the code assumes that if you have, say, a 2MB buffer
to write and poll() tells you that the pipe descriptor is ready for
writing, that calling:
write(cmd->in, buf, 2*1024*1024);
will do a partial write, filling the pipe buffer and then returning what
it did write. And that is what it would do on a socket, but not for a
pipe. When writing to a pipe, at least on Linux, it will block waiting
for the child process to read() more. And now we have a potential
deadlock, because the child may be writing back to us, waiting for us to
read() ourselves.
An easy way to trigger this is:
git -c add.interactive.useBuiltin=true \
-c interactive.diffFilter=cat \
checkout -p HEAD~200
The diff against HEAD~200 will be big, and the filter wants to write all
of it back to us (obviously this is a dummy filter, but in the real
world something like diff-highlight would similarly stream back a big
output).
If you set add.interactive.useBuiltin to false, the problem goes away,
because now we're not using pipe_command() anymore (instead, that part
happens in perl). But this isn't a bug in the interactive code at all.
It's the underlying pipe_command() code which is broken, and has been
all along.
We presumably didn't notice because most calls only do input _or_
output, not both. And the few that do both, like gpg calls, may have
large inputs or outputs, but never both at the same time (e.g., consider
signing, which has a large payload but a small signature comes back).
The obvious fix is to put the descriptor into non-blocking mode, and
indeed, that makes the problem go away. Callers shouldn't need to
care, because they never see the descriptor (they hand us a buffer to
feed into it).
The included test fails reliably on Linux without this patch. Curiously,
it doesn't fail in our Windows CI environment, but has been reported to
do so for individual developers. It should pass in any environment after
this patch (courtesy of the compat/ layers added in the last few
commits).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-08-17 06:10:22 +00:00
|
|
|
#include "compat/nonblock.h"
|
2023-05-16 21:33:43 +00:00
|
|
|
#include "alloc.h"
|
2005-07-31 19:17:43 +00:00
|
|
|
|
2014-08-19 19:10:48 +00:00
|
|
|
void child_process_init(struct child_process *child)
|
|
|
|
{
|
2021-07-01 10:51:26 +00:00
|
|
|
struct child_process blank = CHILD_PROCESS_INIT;
|
|
|
|
memcpy(child, &blank, sizeof(*child));
|
2014-08-19 19:10:48 +00:00
|
|
|
}
|
|
|
|
|
2015-10-24 12:11:27 +00:00
|
|
|
void child_process_clear(struct child_process *child)
|
|
|
|
{
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_clear(&child->args);
|
2022-06-02 09:09:50 +00:00
|
|
|
strvec_clear(&child->env);
|
2015-10-24 12:11:27 +00:00
|
|
|
}
|
|
|
|
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
struct child_to_clean {
|
|
|
|
pid_t pid;
|
2016-10-16 23:20:28 +00:00
|
|
|
struct child_process *process;
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
struct child_to_clean *next;
|
|
|
|
};
|
|
|
|
static struct child_to_clean *children_to_clean;
|
|
|
|
static int installed_child_cleanup_handler;
|
|
|
|
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 09:35:57 +00:00
|
|
|
static void cleanup_children(int sig, int in_signal)
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
{
|
execv_dashed_external: wait for child on signal death
When you hit ^C to interrupt a git command going to a pager,
this usually leaves the pager running. But when a dashed
external is in use, the pager ends up in a funny state and
quits (but only after eating one more character from the
terminal!). This fixes it.
Explaining the reason will require a little background.
When git runs a pager, it's important for the git process to
hang around and wait for the pager to finish, even though it
has no more data to feed it. This is because git spawns the
pager as a child, and thus the git process is the session
leader on the terminal. After it dies, the pager will finish
its current read from the terminal (eating the one
character), and then get EIO trying to read again.
When you hit ^C, that sends SIGINT to git and to the pager,
and it's a similar situation. The pager ignores it, but the
git process needs to hang around until the pager is done. We
addressed that long ago in a3da882120 (pager: do
wait_for_pager on signal death, 2009-01-22).
But when you have a dashed external (or an alias pointing to
a builtin, which will re-exec git for the builtin), there's
an extra process in the mix. For instance, running:
$ git -c alias.l=log l
will end up with a process tree like:
git (parent)
\
git-log (child)
\
less (pager)
If you hit ^C, SIGINT goes to all of them. The pager ignores
it, and the child git process will end up in wait_for_pager().
But the parent git process will die, and the usual EIO
trouble happens.
So we really want the parent git process to wait_for_pager(),
but of course it doesn't know anything about the pager at
all, since it was started by the child. However, we can
have it wait on the git-log child, which in turn is waiting
on the pager. And that's what this patch does.
There are a few design decisions here worth explaining:
1. The new feature is attached to run-command's
clean_on_exit feature. Partly this is convenience,
since that feature already has a signal handler that
deals with child cleanup.
But it's also a meaningful connection. The main reason
that dashed externals use clean_on_exit is to bind the
two processes together. If somebody kills the parent
with a signal, we propagate that to the child (in this
instance with SIGINT, we do propagate but it doesn't
matter because the original signal went to the whole
process group). Likewise, we do not want the parent
to go away until the child has done so.
In a traditional Unix world, we'd probably accomplish
this binding by just having the parent execve() the
child directly. But since that doesn't work on Windows,
everything goes through run_command's more spawn-like
interface.
2. We do _not_ automatically waitpid() on any
clean_on_exit children. For dashed externals this makes
sense; we know that the parent is doing nothing but
waiting for the child to exit anyway. But with other
children, it's possible that the child, after getting
the signal, could be waiting on the parent to do
something (like closing a descriptor). If we were to
wait on such a child, we'd end up in a deadlock. So
this errs on the side of caution, and lets callers
enable the feature explicitly.
3. When we send children the cleanup signal, we send all
the signals first, before waiting on any children. This
is to avoid the case where one child might be waiting
on another one to exit, causing a deadlock. We inform
all of them that it's time to die before reaping any.
In practice, there is only ever one dashed external run
from a given process, so this doesn't matter much now.
But it future-proofs us if other callers start using
the wait_after_clean mechanism.
There's no automated test here, because it would end up racy
and unportable. But it's easy to reproduce the situation by
running the log command given above and hitting ^C.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-07 01:22:23 +00:00
|
|
|
struct child_to_clean *children_to_wait_for = NULL;
|
|
|
|
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
while (children_to_clean) {
|
|
|
|
struct child_to_clean *p = children_to_clean;
|
|
|
|
children_to_clean = p->next;
|
2016-10-16 23:20:28 +00:00
|
|
|
|
|
|
|
if (p->process && !in_signal) {
|
|
|
|
struct child_process *process = p->process;
|
|
|
|
if (process->clean_on_exit_handler) {
|
|
|
|
trace_printf(
|
|
|
|
"trace: run_command: running exit handler for pid %"
|
|
|
|
PRIuMAX, (uintmax_t)p->pid
|
|
|
|
);
|
|
|
|
process->clean_on_exit_handler(process);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
kill(p->pid, sig);
|
execv_dashed_external: wait for child on signal death
When you hit ^C to interrupt a git command going to a pager,
this usually leaves the pager running. But when a dashed
external is in use, the pager ends up in a funny state and
quits (but only after eating one more character from the
terminal!). This fixes it.
Explaining the reason will require a little background.
When git runs a pager, it's important for the git process to
hang around and wait for the pager to finish, even though it
has no more data to feed it. This is because git spawns the
pager as a child, and thus the git process is the session
leader on the terminal. After it dies, the pager will finish
its current read from the terminal (eating the one
character), and then get EIO trying to read again.
When you hit ^C, that sends SIGINT to git and to the pager,
and it's a similar situation. The pager ignores it, but the
git process needs to hang around until the pager is done. We
addressed that long ago in a3da882120 (pager: do
wait_for_pager on signal death, 2009-01-22).
But when you have a dashed external (or an alias pointing to
a builtin, which will re-exec git for the builtin), there's
an extra process in the mix. For instance, running:
$ git -c alias.l=log l
will end up with a process tree like:
git (parent)
\
git-log (child)
\
less (pager)
If you hit ^C, SIGINT goes to all of them. The pager ignores
it, and the child git process will end up in wait_for_pager().
But the parent git process will die, and the usual EIO
trouble happens.
So we really want the parent git process to wait_for_pager(),
but of course it doesn't know anything about the pager at
all, since it was started by the child. However, we can
have it wait on the git-log child, which in turn is waiting
on the pager. And that's what this patch does.
There are a few design decisions here worth explaining:
1. The new feature is attached to run-command's
clean_on_exit feature. Partly this is convenience,
since that feature already has a signal handler that
deals with child cleanup.
But it's also a meaningful connection. The main reason
that dashed externals use clean_on_exit is to bind the
two processes together. If somebody kills the parent
with a signal, we propagate that to the child (in this
instance with SIGINT, we do propagate but it doesn't
matter because the original signal went to the whole
process group). Likewise, we do not want the parent
to go away until the child has done so.
In a traditional Unix world, we'd probably accomplish
this binding by just having the parent execve() the
child directly. But since that doesn't work on Windows,
everything goes through run_command's more spawn-like
interface.
2. We do _not_ automatically waitpid() on any
clean_on_exit children. For dashed externals this makes
sense; we know that the parent is doing nothing but
waiting for the child to exit anyway. But with other
children, it's possible that the child, after getting
the signal, could be waiting on the parent to do
something (like closing a descriptor). If we were to
wait on such a child, we'd end up in a deadlock. So
this errs on the side of caution, and lets callers
enable the feature explicitly.
3. When we send children the cleanup signal, we send all
the signals first, before waiting on any children. This
is to avoid the case where one child might be waiting
on another one to exit, causing a deadlock. We inform
all of them that it's time to die before reaping any.
In practice, there is only ever one dashed external run
from a given process, so this doesn't matter much now.
But it future-proofs us if other callers start using
the wait_after_clean mechanism.
There's no automated test here, because it would end up racy
and unportable. But it's easy to reproduce the situation by
running the log command given above and hitting ^C.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-07 01:22:23 +00:00
|
|
|
|
2017-03-17 23:20:04 +00:00
|
|
|
if (p->process && p->process->wait_after_clean) {
|
execv_dashed_external: wait for child on signal death
When you hit ^C to interrupt a git command going to a pager,
this usually leaves the pager running. But when a dashed
external is in use, the pager ends up in a funny state and
quits (but only after eating one more character from the
terminal!). This fixes it.
Explaining the reason will require a little background.
When git runs a pager, it's important for the git process to
hang around and wait for the pager to finish, even though it
has no more data to feed it. This is because git spawns the
pager as a child, and thus the git process is the session
leader on the terminal. After it dies, the pager will finish
its current read from the terminal (eating the one
character), and then get EIO trying to read again.
When you hit ^C, that sends SIGINT to git and to the pager,
and it's a similar situation. The pager ignores it, but the
git process needs to hang around until the pager is done. We
addressed that long ago in a3da882120 (pager: do
wait_for_pager on signal death, 2009-01-22).
But when you have a dashed external (or an alias pointing to
a builtin, which will re-exec git for the builtin), there's
an extra process in the mix. For instance, running:
$ git -c alias.l=log l
will end up with a process tree like:
git (parent)
\
git-log (child)
\
less (pager)
If you hit ^C, SIGINT goes to all of them. The pager ignores
it, and the child git process will end up in wait_for_pager().
But the parent git process will die, and the usual EIO
trouble happens.
So we really want the parent git process to wait_for_pager(),
but of course it doesn't know anything about the pager at
all, since it was started by the child. However, we can
have it wait on the git-log child, which in turn is waiting
on the pager. And that's what this patch does.
There are a few design decisions here worth explaining:
1. The new feature is attached to run-command's
clean_on_exit feature. Partly this is convenience,
since that feature already has a signal handler that
deals with child cleanup.
But it's also a meaningful connection. The main reason
that dashed externals use clean_on_exit is to bind the
two processes together. If somebody kills the parent
with a signal, we propagate that to the child (in this
instance with SIGINT, we do propagate but it doesn't
matter because the original signal went to the whole
process group). Likewise, we do not want the parent
to go away until the child has done so.
In a traditional Unix world, we'd probably accomplish
this binding by just having the parent execve() the
child directly. But since that doesn't work on Windows,
everything goes through run_command's more spawn-like
interface.
2. We do _not_ automatically waitpid() on any
clean_on_exit children. For dashed externals this makes
sense; we know that the parent is doing nothing but
waiting for the child to exit anyway. But with other
children, it's possible that the child, after getting
the signal, could be waiting on the parent to do
something (like closing a descriptor). If we were to
wait on such a child, we'd end up in a deadlock. So
this errs on the side of caution, and lets callers
enable the feature explicitly.
3. When we send children the cleanup signal, we send all
the signals first, before waiting on any children. This
is to avoid the case where one child might be waiting
on another one to exit, causing a deadlock. We inform
all of them that it's time to die before reaping any.
In practice, there is only ever one dashed external run
from a given process, so this doesn't matter much now.
But it future-proofs us if other callers start using
the wait_after_clean mechanism.
There's no automated test here, because it would end up racy
and unportable. But it's easy to reproduce the situation by
running the log command given above and hitting ^C.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2017-01-07 01:22:23 +00:00
|
|
|
p->next = children_to_wait_for;
|
|
|
|
children_to_wait_for = p;
|
|
|
|
} else {
|
|
|
|
if (!in_signal)
|
|
|
|
free(p);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while (children_to_wait_for) {
|
|
|
|
struct child_to_clean *p = children_to_wait_for;
|
|
|
|
children_to_wait_for = p->next;
|
|
|
|
|
|
|
|
while (waitpid(p->pid, NULL, 0) < 0 && errno == EINTR)
|
|
|
|
; /* spin waiting for process exit or error */
|
|
|
|
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 09:35:57 +00:00
|
|
|
if (!in_signal)
|
|
|
|
free(p);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cleanup_children_on_signal(int sig)
|
|
|
|
{
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 09:35:57 +00:00
|
|
|
cleanup_children(sig, 1);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
sigchain_pop(sig);
|
|
|
|
raise(sig);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cleanup_children_on_exit(void)
|
|
|
|
{
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 09:35:57 +00:00
|
|
|
cleanup_children(SIGTERM, 0);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
}
|
|
|
|
|
2016-10-16 23:20:28 +00:00
|
|
|
static void mark_child_for_cleanup(pid_t pid, struct child_process *process)
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
{
|
|
|
|
struct child_to_clean *p = xmalloc(sizeof(*p));
|
|
|
|
p->pid = pid;
|
2016-10-16 23:20:28 +00:00
|
|
|
p->process = process;
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
p->next = children_to_clean;
|
|
|
|
children_to_clean = p;
|
|
|
|
|
|
|
|
if (!installed_child_cleanup_handler) {
|
|
|
|
atexit(cleanup_children_on_exit);
|
|
|
|
sigchain_push_common(cleanup_children_on_signal);
|
|
|
|
installed_child_cleanup_handler = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void clear_child_for_cleanup(pid_t pid)
|
|
|
|
{
|
2012-09-11 14:32:47 +00:00
|
|
|
struct child_to_clean **pp;
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
|
2012-09-11 14:32:47 +00:00
|
|
|
for (pp = &children_to_clean; *pp; pp = &(*pp)->next) {
|
|
|
|
struct child_to_clean *clean_me = *pp;
|
|
|
|
|
|
|
|
if (clean_me->pid == pid) {
|
|
|
|
*pp = clean_me->next;
|
|
|
|
free(clean_me);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-03-12 18:37:28 +00:00
|
|
|
static inline void close_pair(int fd[2])
|
|
|
|
{
|
|
|
|
close(fd[0]);
|
|
|
|
close(fd[1]);
|
|
|
|
}
|
|
|
|
|
2017-04-25 23:46:59 +00:00
|
|
|
int is_executable(const char *name)
|
|
|
|
{
|
|
|
|
struct stat st;
|
|
|
|
|
|
|
|
if (stat(name, &st) || /* stat, not lstat */
|
|
|
|
!S_ISREG(st.st_mode))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
#if defined(GIT_WINDOWS_NATIVE)
|
|
|
|
/*
|
|
|
|
* On Windows there is no executable bit. The file extension
|
|
|
|
* indicates whether it can be run as an executable, and Git
|
|
|
|
* has special-handling to detect scripts and launch them
|
|
|
|
* through the indicated script interpreter. We test for the
|
|
|
|
* file extension first because virus scanners may make
|
|
|
|
* it quite expensive to open many files.
|
|
|
|
*/
|
|
|
|
if (ends_with(name, ".exe"))
|
|
|
|
return S_IXUSR;
|
|
|
|
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Now that we know it does not have an executable extension,
|
|
|
|
* peek into the file instead.
|
|
|
|
*/
|
|
|
|
char buf[3] = { 0 };
|
|
|
|
int n;
|
|
|
|
int fd = open(name, O_RDONLY);
|
|
|
|
st.st_mode &= ~S_IXUSR;
|
|
|
|
if (fd >= 0) {
|
|
|
|
n = read(fd, buf, 2);
|
|
|
|
if (n == 2)
|
|
|
|
/* look for a she-bang */
|
|
|
|
if (!strcmp(buf, "#!"))
|
|
|
|
st.st_mode |= S_IXUSR;
|
|
|
|
close(fd);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return st.st_mode & S_IXUSR;
|
|
|
|
}
|
|
|
|
|
2023-08-04 04:08:42 +00:00
|
|
|
#ifndef locate_in_PATH
|
2017-04-25 23:47:00 +00:00
|
|
|
/*
|
|
|
|
* Search $PATH for a command. This emulates the path search that
|
|
|
|
* execvp would perform, without actually executing the command so it
|
|
|
|
* can be used before fork() to prepare to run a command using
|
|
|
|
* execve() or after execvp() to diagnose why it failed.
|
|
|
|
*
|
|
|
|
* The caller should ensure that file contains no directory
|
|
|
|
* separators.
|
|
|
|
*
|
|
|
|
* Returns the path to the command, as found in $PATH or NULL if the
|
|
|
|
* command could not be found. The caller inherits ownership of the memory
|
|
|
|
* used to store the resultant path.
|
|
|
|
*
|
|
|
|
* This should not be used on Windows, where the $PATH search rules
|
|
|
|
* are more complicated (e.g., a search for "foo" should find
|
|
|
|
* "foo.exe").
|
|
|
|
*/
|
2012-03-30 07:52:18 +00:00
|
|
|
static char *locate_in_PATH(const char *file)
|
|
|
|
{
|
|
|
|
const char *p = getenv("PATH");
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
|
|
|
|
if (!p || !*p)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
while (1) {
|
|
|
|
const char *end = strchrnul(p, ':');
|
|
|
|
|
|
|
|
strbuf_reset(&buf);
|
|
|
|
|
|
|
|
/* POSIX specifies an empty entry as the current directory. */
|
|
|
|
if (end != p) {
|
|
|
|
strbuf_add(&buf, p, end - p);
|
|
|
|
strbuf_addch(&buf, '/');
|
|
|
|
}
|
|
|
|
strbuf_addstr(&buf, file);
|
|
|
|
|
2017-04-25 23:47:00 +00:00
|
|
|
if (is_executable(buf.buf))
|
2012-03-30 07:52:18 +00:00
|
|
|
return strbuf_detach(&buf, NULL);
|
|
|
|
|
|
|
|
if (!*end)
|
|
|
|
break;
|
|
|
|
p = end + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
strbuf_release(&buf);
|
|
|
|
return NULL;
|
|
|
|
}
|
2023-08-04 04:08:42 +00:00
|
|
|
#endif
|
2012-03-30 07:52:18 +00:00
|
|
|
|
2021-09-13 17:39:01 +00:00
|
|
|
int exists_in_PATH(const char *command)
|
2012-03-30 07:52:18 +00:00
|
|
|
{
|
2021-09-13 17:39:01 +00:00
|
|
|
char *r = locate_in_PATH(command);
|
run-command: avoid undefined behavior in exists_in_PATH
In this function, we free the pointer we get from locate_in_PATH and
then check whether it's NULL. However, this is undefined behavior if
the pointer is non-NULL, since the C standard no longer permits us to
use a valid pointer after freeing it.
The only case in which the C standard would permit this to be defined
behavior is if r were NULL, since it states that in such a case "no
action occurs" as a result of calling free.
It's easy to suggest that this is not likely to be a problem, but we
know that GCC does aggressively exploit the fact that undefined
behavior can never occur to optimize and rewrite code, even when that's
contrary to the expectations of the programmer. It is, in fact, very
common for it to omit NULL pointer checks, just as we have here.
Since it's easy to fix, let's do so, and avoid a potential headache in
the future.
Noticed-by: Miriam R. <mirucam@gmail.com>
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-07 01:36:40 +00:00
|
|
|
int found = r != NULL;
|
2012-03-30 07:52:18 +00:00
|
|
|
free(r);
|
run-command: avoid undefined behavior in exists_in_PATH
In this function, we free the pointer we get from locate_in_PATH and
then check whether it's NULL. However, this is undefined behavior if
the pointer is non-NULL, since the C standard no longer permits us to
use a valid pointer after freeing it.
The only case in which the C standard would permit this to be defined
behavior is if r were NULL, since it states that in such a case "no
action occurs" as a result of calling free.
It's easy to suggest that this is not likely to be a problem, but we
know that GCC does aggressively exploit the fact that undefined
behavior can never occur to optimize and rewrite code, even when that's
contrary to the expectations of the programmer. It is, in fact, very
common for it to omit NULL pointer checks, just as we have here.
Since it's easy to fix, let's do so, and avoid a potential headache in
the future.
Noticed-by: Miriam R. <mirucam@gmail.com>
Signed-off-by: brian m. carlson <sandals@crustytoothpaste.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-01-07 01:36:40 +00:00
|
|
|
return found;
|
2012-03-30 07:52:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int sane_execvp(const char *file, char * const argv[])
|
|
|
|
{
|
2019-02-22 22:25:01 +00:00
|
|
|
#ifndef GIT_WINDOWS_NATIVE
|
|
|
|
/*
|
|
|
|
* execvp() doesn't return, so we all we can do is tell trace2
|
|
|
|
* what we are about to do and let it leave a hint in the log
|
|
|
|
* (unless of course the execvp() fails).
|
|
|
|
*
|
|
|
|
* we skip this for Windows because the compat layer already
|
|
|
|
* has to emulate the execvp() call anyway.
|
|
|
|
*/
|
|
|
|
int exec_id = trace2_exec(file, (const char **)argv);
|
|
|
|
#endif
|
|
|
|
|
2012-03-30 07:52:18 +00:00
|
|
|
if (!execvp(file, argv))
|
|
|
|
return 0; /* cannot happen ;-) */
|
|
|
|
|
2019-02-22 22:25:01 +00:00
|
|
|
#ifndef GIT_WINDOWS_NATIVE
|
|
|
|
{
|
|
|
|
int ec = errno;
|
|
|
|
trace2_exec_result(exec_id, ec);
|
|
|
|
errno = ec;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-03-30 07:52:18 +00:00
|
|
|
/*
|
|
|
|
* When a command can't be found because one of the directories
|
|
|
|
* listed in $PATH is unsearchable, execvp reports EACCES, but
|
|
|
|
* careful usability testing (read: analysis of occasional bug
|
|
|
|
* reports) reveals that "No such file or directory" is more
|
|
|
|
* intuitive.
|
|
|
|
*
|
|
|
|
* We avoid commands with "/", because execvp will not do $PATH
|
|
|
|
* lookups in that case.
|
|
|
|
*
|
|
|
|
* The reassignment of EACCES to errno looks like a no-op below,
|
|
|
|
* but we need to protect against exists_in_PATH overwriting errno.
|
|
|
|
*/
|
|
|
|
if (errno == EACCES && !strchr(file, '/'))
|
|
|
|
errno = exists_in_PATH(file) ? EACCES : ENOENT;
|
2012-07-31 19:51:30 +00:00
|
|
|
else if (errno == ENOTDIR && !strchr(file, '/'))
|
|
|
|
errno = ENOENT;
|
2012-03-30 07:52:18 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2020-07-28 20:25:12 +00:00
|
|
|
static const char **prepare_shell_cmd(struct strvec *out, const char **argv)
|
2009-12-30 10:53:16 +00:00
|
|
|
{
|
prepare_{git,shell}_cmd: use argv_array
These functions transform an existing argv into one suitable
for exec-ing or spawning via git or a shell. We can use an
argv_array in each to avoid dealing with manual counting and
allocation.
This also makes the memory allocation more clear and fixes
some leaks. In prepare_shell_cmd, we would sometimes
allocate a new string with "$@" in it and sometimes not,
meaning the caller could not correctly free it. On the
non-Windows side, we are in a child process which will
exec() or exit() immediately, so the leak isn't a big deal.
On Windows, though, we use spawn() from the parent process,
and leak a string for each shell command we run. On top of
that, the Windows code did not free the allocated argv array
at all (but does for the prepare_git_cmd case!).
By switching both of these functions to write into an
argv_array, we can consistently free the result as
appropriate.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-02-22 22:44:39 +00:00
|
|
|
if (!argv[0])
|
2018-05-02 09:38:39 +00:00
|
|
|
BUG("shell command is empty");
|
2009-12-30 10:53:16 +00:00
|
|
|
|
2009-12-30 10:55:36 +00:00
|
|
|
if (strcspn(argv[0], "|&;<>()$`\\\"' \t\n*?[#~=%") != strlen(argv[0])) {
|
2013-05-02 19:26:08 +00:00
|
|
|
#ifndef GIT_WINDOWS_NATIVE
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(out, SHELL_PATH);
|
Do not use SHELL_PATH from build system in prepare_shell_cmd on Windows
The recent change to use SHELL_PATH instead of "sh" to spawn shell commands
is not suited for Windows:
- The default setting, "/bin/sh", does not work when git has to run the
shell because it is a POSIX style path, but not a proper Windows style
path.
- If it worked, it would hard-code a position in the files system where
the shell is expected, making git (more precisely, the POSIX toolset that
is needed alongside git) non-relocatable. But we cannot sacrifice
relocatability on Windows.
- Apart from that, even though the Makefile leaves SHELL_PATH set to
"/bin/sh" for the Windows builds, the build system passes a mangled path
to the compiler, and something like "D:/Src/msysgit/bin/sh" is used,
which is doubly bad because it points to where /bin/sh resolves to on
the system where git was built.
- Finally, the system's CreateProcess() function that is used under
mingw.c's hood does not work with forward slashes and cannot find the
shell.
Undo the earlier change on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-04-17 07:03:21 +00:00
|
|
|
#else
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(out, "sh");
|
Do not use SHELL_PATH from build system in prepare_shell_cmd on Windows
The recent change to use SHELL_PATH instead of "sh" to spawn shell commands
is not suited for Windows:
- The default setting, "/bin/sh", does not work when git has to run the
shell because it is a POSIX style path, but not a proper Windows style
path.
- If it worked, it would hard-code a position in the files system where
the shell is expected, making git (more precisely, the POSIX toolset that
is needed alongside git) non-relocatable. But we cannot sacrifice
relocatability on Windows.
- Apart from that, even though the Makefile leaves SHELL_PATH set to
"/bin/sh" for the Windows builds, the build system passes a mangled path
to the compiler, and something like "D:/Src/msysgit/bin/sh" is used,
which is doubly bad because it points to where /bin/sh resolves to on
the system where git was built.
- Finally, the system's CreateProcess() function that is used under
mingw.c's hood does not work with forward slashes and cannot find the
shell.
Undo the earlier change on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-04-17 07:03:21 +00:00
|
|
|
#endif
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(out, "-c");
|
2009-12-30 10:53:16 +00:00
|
|
|
|
prepare_{git,shell}_cmd: use argv_array
These functions transform an existing argv into one suitable
for exec-ing or spawning via git or a shell. We can use an
argv_array in each to avoid dealing with manual counting and
allocation.
This also makes the memory allocation more clear and fixes
some leaks. In prepare_shell_cmd, we would sometimes
allocate a new string with "$@" in it and sometimes not,
meaning the caller could not correctly free it. On the
non-Windows side, we are in a child process which will
exec() or exit() immediately, so the leak isn't a big deal.
On Windows, though, we use spawn() from the parent process,
and leak a string for each shell command we run. On top of
that, the Windows code did not free the allocated argv array
at all (but does for the prepare_git_cmd case!).
By switching both of these functions to write into an
argv_array, we can consistently free the result as
appropriate.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-02-22 22:44:39 +00:00
|
|
|
/*
|
|
|
|
* If we have no extra arguments, we do not even need to
|
|
|
|
* bother with the "$@" magic.
|
|
|
|
*/
|
|
|
|
if (!argv[1])
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(out, argv[0]);
|
prepare_{git,shell}_cmd: use argv_array
These functions transform an existing argv into one suitable
for exec-ing or spawning via git or a shell. We can use an
argv_array in each to avoid dealing with manual counting and
allocation.
This also makes the memory allocation more clear and fixes
some leaks. In prepare_shell_cmd, we would sometimes
allocate a new string with "$@" in it and sometimes not,
meaning the caller could not correctly free it. On the
non-Windows side, we are in a child process which will
exec() or exit() immediately, so the leak isn't a big deal.
On Windows, though, we use spawn() from the parent process,
and leak a string for each shell command we run. On top of
that, the Windows code did not free the allocated argv array
at all (but does for the prepare_git_cmd case!).
By switching both of these functions to write into an
argv_array, we can consistently free the result as
appropriate.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-02-22 22:44:39 +00:00
|
|
|
else
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_pushf(out, "%s \"$@\"", argv[0]);
|
prepare_{git,shell}_cmd: use argv_array
These functions transform an existing argv into one suitable
for exec-ing or spawning via git or a shell. We can use an
argv_array in each to avoid dealing with manual counting and
allocation.
This also makes the memory allocation more clear and fixes
some leaks. In prepare_shell_cmd, we would sometimes
allocate a new string with "$@" in it and sometimes not,
meaning the caller could not correctly free it. On the
non-Windows side, we are in a child process which will
exec() or exit() immediately, so the leak isn't a big deal.
On Windows, though, we use spawn() from the parent process,
and leak a string for each shell command we run. On top of
that, the Windows code did not free the allocated argv array
at all (but does for the prepare_git_cmd case!).
By switching both of these functions to write into an
argv_array, we can consistently free the result as
appropriate.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-02-22 22:44:39 +00:00
|
|
|
}
|
2009-12-30 10:53:16 +00:00
|
|
|
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_pushv(out, argv);
|
2020-07-29 00:37:20 +00:00
|
|
|
return out->v;
|
2009-12-30 10:53:16 +00:00
|
|
|
}
|
|
|
|
|
2013-05-02 19:26:08 +00:00
|
|
|
#ifndef GIT_WINDOWS_NATIVE
|
2010-01-10 13:11:22 +00:00
|
|
|
static int child_notifier = -1;
|
|
|
|
|
2017-04-19 23:13:24 +00:00
|
|
|
enum child_errcode {
|
|
|
|
CHILD_ERR_CHDIR,
|
2017-04-19 23:13:25 +00:00
|
|
|
CHILD_ERR_DUP2,
|
|
|
|
CHILD_ERR_CLOSE,
|
2017-04-19 23:13:27 +00:00
|
|
|
CHILD_ERR_SIGPROCMASK,
|
2017-04-19 23:13:24 +00:00
|
|
|
CHILD_ERR_SILENT,
|
|
|
|
CHILD_ERR_ERRNO
|
|
|
|
};
|
|
|
|
|
|
|
|
struct child_err {
|
|
|
|
enum child_errcode err;
|
|
|
|
int syserr; /* errno */
|
|
|
|
};
|
|
|
|
|
|
|
|
static void child_die(enum child_errcode err)
|
2010-01-10 13:11:22 +00:00
|
|
|
{
|
2017-04-19 23:13:24 +00:00
|
|
|
struct child_err buf;
|
|
|
|
|
|
|
|
buf.err = err;
|
|
|
|
buf.syserr = errno;
|
|
|
|
|
|
|
|
/* write(2) on buf smaller than PIPE_BUF (min 512) is atomic: */
|
|
|
|
xwrite(child_notifier, &buf, sizeof(buf));
|
|
|
|
_exit(1);
|
|
|
|
}
|
|
|
|
|
2017-04-19 23:13:25 +00:00
|
|
|
static void child_dup2(int fd, int to)
|
|
|
|
{
|
|
|
|
if (dup2(fd, to) < 0)
|
|
|
|
child_die(CHILD_ERR_DUP2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void child_close(int fd)
|
|
|
|
{
|
|
|
|
if (close(fd))
|
|
|
|
child_die(CHILD_ERR_CLOSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void child_close_pair(int fd[2])
|
|
|
|
{
|
|
|
|
child_close(fd[0]);
|
|
|
|
child_close(fd[1]);
|
|
|
|
}
|
|
|
|
|
2023-02-24 06:39:18 +00:00
|
|
|
static void child_error_fn(const char *err UNUSED, va_list params UNUSED)
|
2017-04-19 23:13:24 +00:00
|
|
|
{
|
|
|
|
const char msg[] = "error() should not be called in child\n";
|
|
|
|
xwrite(2, msg, sizeof(msg) - 1);
|
|
|
|
}
|
|
|
|
|
2023-02-24 06:39:18 +00:00
|
|
|
static void child_warn_fn(const char *err UNUSED, va_list params UNUSED)
|
2017-04-19 23:13:24 +00:00
|
|
|
{
|
|
|
|
const char msg[] = "warn() should not be called in child\n";
|
|
|
|
xwrite(2, msg, sizeof(msg) - 1);
|
|
|
|
}
|
|
|
|
|
2023-02-24 06:39:18 +00:00
|
|
|
static void NORETURN child_die_fn(const char *err UNUSED, va_list params UNUSED)
|
2017-04-19 23:13:24 +00:00
|
|
|
{
|
|
|
|
const char msg[] = "die() should not be called in child\n";
|
|
|
|
xwrite(2, msg, sizeof(msg) - 1);
|
|
|
|
_exit(2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* this runs in the parent process */
|
|
|
|
static void child_err_spew(struct child_process *cmd, struct child_err *cerr)
|
|
|
|
{
|
|
|
|
static void (*old_errfn)(const char *err, va_list params);
|
2021-12-07 18:26:30 +00:00
|
|
|
report_fn die_message_routine = get_die_message_routine();
|
2017-04-19 23:13:24 +00:00
|
|
|
|
|
|
|
old_errfn = get_error_routine();
|
2021-12-07 18:26:30 +00:00
|
|
|
set_error_routine(die_message_routine);
|
2017-04-19 23:13:24 +00:00
|
|
|
errno = cerr->syserr;
|
|
|
|
|
|
|
|
switch (cerr->err) {
|
|
|
|
case CHILD_ERR_CHDIR:
|
|
|
|
error_errno("exec '%s': cd to '%s' failed",
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
cmd->args.v[0], cmd->dir);
|
2017-04-19 23:13:24 +00:00
|
|
|
break;
|
2017-04-19 23:13:25 +00:00
|
|
|
case CHILD_ERR_DUP2:
|
|
|
|
error_errno("dup2() in child failed");
|
|
|
|
break;
|
|
|
|
case CHILD_ERR_CLOSE:
|
|
|
|
error_errno("close() in child failed");
|
|
|
|
break;
|
2017-04-19 23:13:27 +00:00
|
|
|
case CHILD_ERR_SIGPROCMASK:
|
|
|
|
error_errno("sigprocmask failed restoring signals");
|
|
|
|
break;
|
2017-04-19 23:13:24 +00:00
|
|
|
case CHILD_ERR_SILENT:
|
|
|
|
break;
|
|
|
|
case CHILD_ERR_ERRNO:
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
error_errno("cannot exec '%s'", cmd->args.v[0]);
|
2017-04-19 23:13:24 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
set_error_routine(old_errfn);
|
2010-01-10 13:11:22 +00:00
|
|
|
}
|
2017-04-19 23:13:19 +00:00
|
|
|
|
2020-07-28 20:25:12 +00:00
|
|
|
static int prepare_cmd(struct strvec *out, const struct child_process *cmd)
|
2017-04-19 23:13:19 +00:00
|
|
|
{
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
if (!cmd->args.v[0])
|
2018-05-02 09:38:39 +00:00
|
|
|
BUG("command is empty");
|
2017-04-19 23:13:19 +00:00
|
|
|
|
2017-04-19 23:13:20 +00:00
|
|
|
/*
|
|
|
|
* Add SHELL_PATH so in the event exec fails with ENOEXEC we can
|
|
|
|
* attempt to interpret the command with 'sh'.
|
|
|
|
*/
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_push(out, SHELL_PATH);
|
2017-04-19 23:13:20 +00:00
|
|
|
|
2017-04-19 23:13:19 +00:00
|
|
|
if (cmd->git_cmd) {
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
prepare_git_cmd(out, cmd->args.v);
|
2017-04-19 23:13:19 +00:00
|
|
|
} else if (cmd->use_shell) {
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
prepare_shell_cmd(out, cmd->args.v);
|
2017-04-19 23:13:19 +00:00
|
|
|
} else {
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
strvec_pushv(out, cmd->args.v);
|
2017-04-19 23:13:19 +00:00
|
|
|
}
|
2017-04-19 23:13:20 +00:00
|
|
|
|
|
|
|
/*
|
2020-03-27 00:36:43 +00:00
|
|
|
* If there are no dir separator characters in the command then perform
|
|
|
|
* a path lookup and use the resolved path as the command to exec. If
|
|
|
|
* there are dir separator characters, we have exec attempt to invoke
|
|
|
|
* the command directly.
|
2017-04-19 23:13:20 +00:00
|
|
|
*/
|
2020-07-29 00:37:20 +00:00
|
|
|
if (!has_dir_sep(out->v[1])) {
|
|
|
|
char *program = locate_in_PATH(out->v[1]);
|
2017-04-19 23:13:20 +00:00
|
|
|
if (program) {
|
2020-07-29 00:37:20 +00:00
|
|
|
free((char *)out->v[1]);
|
|
|
|
out->v[1] = program;
|
2018-10-24 07:38:00 +00:00
|
|
|
} else {
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_clear(out);
|
2018-10-24 07:38:00 +00:00
|
|
|
errno = ENOENT;
|
|
|
|
return -1;
|
2017-04-19 23:13:20 +00:00
|
|
|
}
|
|
|
|
}
|
2018-10-24 07:38:00 +00:00
|
|
|
|
|
|
|
return 0;
|
2017-04-19 23:13:19 +00:00
|
|
|
}
|
2017-04-19 23:13:22 +00:00
|
|
|
|
|
|
|
static char **prep_childenv(const char *const *deltaenv)
|
|
|
|
{
|
|
|
|
extern char **environ;
|
|
|
|
char **childenv;
|
|
|
|
struct string_list env = STRING_LIST_INIT_DUP;
|
|
|
|
struct strbuf key = STRBUF_INIT;
|
|
|
|
const char *const *p;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Construct a sorted string list consisting of the current environ */
|
|
|
|
for (p = (const char *const *) environ; p && *p; p++) {
|
|
|
|
const char *equals = strchr(*p, '=');
|
|
|
|
|
|
|
|
if (equals) {
|
|
|
|
strbuf_reset(&key);
|
|
|
|
strbuf_add(&key, *p, equals - *p);
|
|
|
|
string_list_append(&env, key.buf)->util = (void *) *p;
|
|
|
|
} else {
|
|
|
|
string_list_append(&env, *p)->util = (void *) *p;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
string_list_sort(&env);
|
|
|
|
|
|
|
|
/* Merge in 'deltaenv' with the current environ */
|
|
|
|
for (p = deltaenv; p && *p; p++) {
|
|
|
|
const char *equals = strchr(*p, '=');
|
|
|
|
|
|
|
|
if (equals) {
|
|
|
|
/* ('key=value'), insert or replace entry */
|
|
|
|
strbuf_reset(&key);
|
|
|
|
strbuf_add(&key, *p, equals - *p);
|
|
|
|
string_list_insert(&env, key.buf)->util = (void *) *p;
|
|
|
|
} else {
|
|
|
|
/* otherwise ('key') remove existing entry */
|
|
|
|
string_list_remove(&env, *p, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create an array of 'char *' to be used as the childenv */
|
2017-10-01 15:14:31 +00:00
|
|
|
ALLOC_ARRAY(childenv, env.nr + 1);
|
2017-04-19 23:13:22 +00:00
|
|
|
for (i = 0; i < env.nr; i++)
|
|
|
|
childenv[i] = env.items[i].util;
|
|
|
|
childenv[env.nr] = NULL;
|
|
|
|
|
|
|
|
string_list_clear(&env, 0);
|
|
|
|
strbuf_release(&key);
|
|
|
|
return childenv;
|
|
|
|
}
|
2017-04-19 23:13:27 +00:00
|
|
|
|
|
|
|
struct atfork_state {
|
|
|
|
#ifndef NO_PTHREADS
|
|
|
|
int cs;
|
|
|
|
#endif
|
|
|
|
sigset_t old;
|
|
|
|
};
|
|
|
|
|
2018-05-02 09:38:31 +00:00
|
|
|
#define CHECK_BUG(err, msg) \
|
|
|
|
do { \
|
|
|
|
int e = (err); \
|
|
|
|
if (e) \
|
|
|
|
BUG("%s: %s", msg, strerror(e)); \
|
|
|
|
} while(0)
|
2017-04-19 23:13:27 +00:00
|
|
|
|
|
|
|
static void atfork_prepare(struct atfork_state *as)
|
|
|
|
{
|
|
|
|
sigset_t all;
|
|
|
|
|
|
|
|
if (sigfillset(&all))
|
|
|
|
die_errno("sigfillset");
|
|
|
|
#ifdef NO_PTHREADS
|
|
|
|
if (sigprocmask(SIG_SETMASK, &all, &as->old))
|
|
|
|
die_errno("sigprocmask");
|
|
|
|
#else
|
2018-05-02 09:38:31 +00:00
|
|
|
CHECK_BUG(pthread_sigmask(SIG_SETMASK, &all, &as->old),
|
2017-04-19 23:13:27 +00:00
|
|
|
"blocking all signals");
|
2018-05-02 09:38:31 +00:00
|
|
|
CHECK_BUG(pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &as->cs),
|
2017-04-19 23:13:27 +00:00
|
|
|
"disabling cancellation");
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void atfork_parent(struct atfork_state *as)
|
|
|
|
{
|
|
|
|
#ifdef NO_PTHREADS
|
|
|
|
if (sigprocmask(SIG_SETMASK, &as->old, NULL))
|
|
|
|
die_errno("sigprocmask");
|
|
|
|
#else
|
2018-05-02 09:38:31 +00:00
|
|
|
CHECK_BUG(pthread_setcancelstate(as->cs, NULL),
|
2017-04-19 23:13:27 +00:00
|
|
|
"re-enabling cancellation");
|
2018-05-02 09:38:31 +00:00
|
|
|
CHECK_BUG(pthread_sigmask(SIG_SETMASK, &as->old, NULL),
|
2017-04-19 23:13:27 +00:00
|
|
|
"restoring signal mask");
|
2010-03-06 15:40:42 +00:00
|
|
|
#endif
|
2017-04-19 23:13:27 +00:00
|
|
|
}
|
|
|
|
#endif /* GIT_WINDOWS_NATIVE */
|
2010-01-10 13:07:52 +00:00
|
|
|
|
|
|
|
static inline void set_cloexec(int fd)
|
|
|
|
{
|
|
|
|
int flags = fcntl(fd, F_GETFD);
|
|
|
|
if (flags >= 0)
|
|
|
|
fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
|
|
|
|
}
|
|
|
|
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 09:35:57 +00:00
|
|
|
static int wait_or_whine(pid_t pid, const char *argv0, int in_signal)
|
2010-01-10 13:08:45 +00:00
|
|
|
{
|
|
|
|
int status, code = -1;
|
|
|
|
pid_t waiting;
|
|
|
|
int failed_errno = 0;
|
|
|
|
|
|
|
|
while ((waiting = waitpid(pid, &status, 0)) < 0 && errno == EINTR)
|
|
|
|
; /* nothing */
|
|
|
|
|
|
|
|
if (waiting < 0) {
|
|
|
|
failed_errno = errno;
|
run-command: unify signal and regular logic for wait_or_whine()
Since 507d7804c0 (pager: don't use unsafe functions in signal handlers,
2015-09-04), we have a separate code path in wait_or_whine() for the
case that we're in a signal handler. But that code path misses some of
the cases handled by the main logic.
This was improved in be8fc53e36 (pager: properly log pager exit code
when signalled, 2021-02-02), but that covered only case: actually
returning the correct error code. But there are some other cases:
- if waitpid() returns failure, we wouldn't notice and would look at
uninitialized garbage in the status variable; it's not clear if it's
possible to trigger this or not
- if the process exited by signal, then we would still report "-1"
rather than the correct signal code
This latter case even had a test added in be8fc53e36, but it doesn't
work reliably. It sets the pager command to:
>pager-used; test-tool sigchain
The latter command will die by signal, but because there are multiple
commands, there will be a shell in between. And it's the shell whose
waitpid() call will see the signal death, and it will then exit with
code 143, which is what Git will see.
To make matters even more confusing, some shells (such as bash) will
realize that there's nothing for the shell to do after test-tool
finishes, and will turn it into an exec. So the test was only checking
what it thought when /bin/sh points to a shell like bash (we're relying
on the shell used internally by Git to spawn sub-commands here, so even
running the test under bash would not be enough).
This patch adjusts the tests to explicitly call "exec" in the pager
command, which produces a consistent outcome regardless of shell. Note
that without the code change in this patch it _should_ fail reliably,
but doesn't. That test, like its siblings, tries to trigger SIGPIPE in
the git-log process writing to the pager, but only do so racily. That
will be fixed in a follow-on patch.
For the code change here, we have two options:
- we can teach the in_signal code to handle WIFSIGNALED()
- we can stop returning early when in_signal is set, and instead
annotate individual calls that we need to skip in this case
The former is a simpler patch, but means we're essentially duplicating
all of the logic. So instead I went with the latter. The result is a
bigger patch, and we do run the risk of new code being added but
forgetting to handle in_signal. But in the long run it seems more
maintainable.
I've skipped any non-trivial calls for the in_signal case, like calling
error(). We'll also skip the call to clear_child_for_cleanup(), as we
were before. This is arguably the wrong thing to do, since we wouldn't
want to try to clean it up again. But:
- we can't call it as-is, because it calls free(), which we must avoid
in a signal handler (we'd have to pass in_signal so it can skip the
free() call)
- we'll only go through the list of children to clean once, since our
cleanup_children_on_signal() handler pops itself after running (and
then re-raises, so eventually we'd just exit). So this cleanup only
matters if a process is on the cleanup list _and_ it has a separate
handler to clean itself up. Which is questionable in the first place
(and AFAIK we do not do).
- double-cleanup isn't actually that bad anyway. waitpid() will just
return an error, which we won't even report because of in_signal.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-22 21:28:13 +00:00
|
|
|
if (!in_signal)
|
|
|
|
error_errno("waitpid for %s failed", argv0);
|
2010-01-10 13:08:45 +00:00
|
|
|
} else if (waiting != pid) {
|
run-command: unify signal and regular logic for wait_or_whine()
Since 507d7804c0 (pager: don't use unsafe functions in signal handlers,
2015-09-04), we have a separate code path in wait_or_whine() for the
case that we're in a signal handler. But that code path misses some of
the cases handled by the main logic.
This was improved in be8fc53e36 (pager: properly log pager exit code
when signalled, 2021-02-02), but that covered only case: actually
returning the correct error code. But there are some other cases:
- if waitpid() returns failure, we wouldn't notice and would look at
uninitialized garbage in the status variable; it's not clear if it's
possible to trigger this or not
- if the process exited by signal, then we would still report "-1"
rather than the correct signal code
This latter case even had a test added in be8fc53e36, but it doesn't
work reliably. It sets the pager command to:
>pager-used; test-tool sigchain
The latter command will die by signal, but because there are multiple
commands, there will be a shell in between. And it's the shell whose
waitpid() call will see the signal death, and it will then exit with
code 143, which is what Git will see.
To make matters even more confusing, some shells (such as bash) will
realize that there's nothing for the shell to do after test-tool
finishes, and will turn it into an exec. So the test was only checking
what it thought when /bin/sh points to a shell like bash (we're relying
on the shell used internally by Git to spawn sub-commands here, so even
running the test under bash would not be enough).
This patch adjusts the tests to explicitly call "exec" in the pager
command, which produces a consistent outcome regardless of shell. Note
that without the code change in this patch it _should_ fail reliably,
but doesn't. That test, like its siblings, tries to trigger SIGPIPE in
the git-log process writing to the pager, but only do so racily. That
will be fixed in a follow-on patch.
For the code change here, we have two options:
- we can teach the in_signal code to handle WIFSIGNALED()
- we can stop returning early when in_signal is set, and instead
annotate individual calls that we need to skip in this case
The former is a simpler patch, but means we're essentially duplicating
all of the logic. So instead I went with the latter. The result is a
bigger patch, and we do run the risk of new code being added but
forgetting to handle in_signal. But in the long run it seems more
maintainable.
I've skipped any non-trivial calls for the in_signal case, like calling
error(). We'll also skip the call to clear_child_for_cleanup(), as we
were before. This is arguably the wrong thing to do, since we wouldn't
want to try to clean it up again. But:
- we can't call it as-is, because it calls free(), which we must avoid
in a signal handler (we'd have to pass in_signal so it can skip the
free() call)
- we'll only go through the list of children to clean once, since our
cleanup_children_on_signal() handler pops itself after running (and
then re-raises, so eventually we'd just exit). So this cleanup only
matters if a process is on the cleanup list _and_ it has a separate
handler to clean itself up. Which is questionable in the first place
(and AFAIK we do not do).
- double-cleanup isn't actually that bad anyway. waitpid() will just
return an error, which we won't even report because of in_signal.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-22 21:28:13 +00:00
|
|
|
if (!in_signal)
|
|
|
|
error("waitpid is confused (%s)", argv0);
|
2010-01-10 13:08:45 +00:00
|
|
|
} else if (WIFSIGNALED(status)) {
|
|
|
|
code = WTERMSIG(status);
|
run-command: unify signal and regular logic for wait_or_whine()
Since 507d7804c0 (pager: don't use unsafe functions in signal handlers,
2015-09-04), we have a separate code path in wait_or_whine() for the
case that we're in a signal handler. But that code path misses some of
the cases handled by the main logic.
This was improved in be8fc53e36 (pager: properly log pager exit code
when signalled, 2021-02-02), but that covered only case: actually
returning the correct error code. But there are some other cases:
- if waitpid() returns failure, we wouldn't notice and would look at
uninitialized garbage in the status variable; it's not clear if it's
possible to trigger this or not
- if the process exited by signal, then we would still report "-1"
rather than the correct signal code
This latter case even had a test added in be8fc53e36, but it doesn't
work reliably. It sets the pager command to:
>pager-used; test-tool sigchain
The latter command will die by signal, but because there are multiple
commands, there will be a shell in between. And it's the shell whose
waitpid() call will see the signal death, and it will then exit with
code 143, which is what Git will see.
To make matters even more confusing, some shells (such as bash) will
realize that there's nothing for the shell to do after test-tool
finishes, and will turn it into an exec. So the test was only checking
what it thought when /bin/sh points to a shell like bash (we're relying
on the shell used internally by Git to spawn sub-commands here, so even
running the test under bash would not be enough).
This patch adjusts the tests to explicitly call "exec" in the pager
command, which produces a consistent outcome regardless of shell. Note
that without the code change in this patch it _should_ fail reliably,
but doesn't. That test, like its siblings, tries to trigger SIGPIPE in
the git-log process writing to the pager, but only do so racily. That
will be fixed in a follow-on patch.
For the code change here, we have two options:
- we can teach the in_signal code to handle WIFSIGNALED()
- we can stop returning early when in_signal is set, and instead
annotate individual calls that we need to skip in this case
The former is a simpler patch, but means we're essentially duplicating
all of the logic. So instead I went with the latter. The result is a
bigger patch, and we do run the risk of new code being added but
forgetting to handle in_signal. But in the long run it seems more
maintainable.
I've skipped any non-trivial calls for the in_signal case, like calling
error(). We'll also skip the call to clear_child_for_cleanup(), as we
were before. This is arguably the wrong thing to do, since we wouldn't
want to try to clean it up again. But:
- we can't call it as-is, because it calls free(), which we must avoid
in a signal handler (we'd have to pass in_signal so it can skip the
free() call)
- we'll only go through the list of children to clean once, since our
cleanup_children_on_signal() handler pops itself after running (and
then re-raises, so eventually we'd just exit). So this cleanup only
matters if a process is on the cleanup list _and_ it has a separate
handler to clean itself up. Which is questionable in the first place
(and AFAIK we do not do).
- double-cleanup isn't actually that bad anyway. waitpid() will just
return an error, which we won't even report because of in_signal.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-22 21:28:13 +00:00
|
|
|
if (!in_signal && code != SIGINT && code != SIGQUIT && code != SIGPIPE)
|
2012-11-30 22:41:38 +00:00
|
|
|
error("%s died of signal %d", argv0, code);
|
2010-01-10 13:08:45 +00:00
|
|
|
/*
|
|
|
|
* This return value is chosen so that code & 0xff
|
|
|
|
* mimics the exit code that a POSIX shell would report for
|
|
|
|
* a program that died from this signal.
|
|
|
|
*/
|
run-command: encode signal death as a positive integer
When a sub-command dies due to a signal, we encode the
signal number into the numeric exit status as "signal -
128". This is easy to identify (versus a regular positive
error code), and when cast to an unsigned integer (e.g., by
feeding it to exit), matches what a POSIX shell would return
when reporting a signal death in $? or through its own exit
code.
So we have a negative value inside the code, but once it
passes across an exit() barrier, it looks positive (and any
code we receive from a sub-shell will have the positive
form). E.g., death by SIGPIPE (signal 13) will look like
-115 to us in inside git, but will end up as 141 when we
call exit() with it. And a program killed by SIGPIPE but run
via the shell will come to us with an exit code of 141.
Unfortunately, this means that when the "use_shell" option
is set, we need to be on the lookout for _both_ forms. We
might or might not have actually invoked the shell (because
we optimize out some useless shell calls). If we didn't invoke
the shell, we will will see the sub-process's signal death
directly, and run-command converts it into a negative value.
But if we did invoke the shell, we will see the shell's
128+signal exit status. To be thorough, we would need to
check both, or cast the value to an unsigned char (after
checking that it is not -1, which is a magic error value).
Fortunately, most callsites do not care at all whether the
exit was from a code or from a signal; they merely check for
a non-zero status, and sometimes propagate the error via
exit(). But for the callers that do care, we can make life
slightly easier by just using the consistent positive form.
This actually fixes two minor bugs:
1. In launch_editor, we check whether the editor died from
SIGINT or SIGQUIT. But we checked only the negative
form, meaning that we would fail to notice a signal
death exit code which was propagated through the shell.
2. In handle_alias, we assume that a negative return value
from run_command means that errno tells us something
interesting (like a fork failure, or ENOENT).
Otherwise, we simply propagate the exit code. Negative
signal death codes confuse us, and we print a useless
"unable to run alias 'foo': Success" message. By
encoding signal deaths using the positive form, the
existing code just propagates it as it would a normal
non-zero exit code.
The downside is that callers of run_command can no longer
differentiate between a signal received directly by the
sub-process, and one propagated. However, no caller
currently cares, and since we already optimize out some
calls to the shell under the hood, that distinction is not
something that should be relied upon by callers.
Fix the same logic in t/test-terminal.perl for consistency [jc:
raised by Jonathan in the discussion].
Signed-off-by: Jeff King <peff@peff.net>
Acked-by: Johannes Sixt <j6t@kdbg.org>
Reviewed-by: Jonathan Nieder <jrnieder@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-01-05 14:49:49 +00:00
|
|
|
code += 128;
|
2010-01-10 13:08:45 +00:00
|
|
|
} else if (WIFEXITED(status)) {
|
|
|
|
code = WEXITSTATUS(status);
|
|
|
|
} else {
|
run-command: unify signal and regular logic for wait_or_whine()
Since 507d7804c0 (pager: don't use unsafe functions in signal handlers,
2015-09-04), we have a separate code path in wait_or_whine() for the
case that we're in a signal handler. But that code path misses some of
the cases handled by the main logic.
This was improved in be8fc53e36 (pager: properly log pager exit code
when signalled, 2021-02-02), but that covered only case: actually
returning the correct error code. But there are some other cases:
- if waitpid() returns failure, we wouldn't notice and would look at
uninitialized garbage in the status variable; it's not clear if it's
possible to trigger this or not
- if the process exited by signal, then we would still report "-1"
rather than the correct signal code
This latter case even had a test added in be8fc53e36, but it doesn't
work reliably. It sets the pager command to:
>pager-used; test-tool sigchain
The latter command will die by signal, but because there are multiple
commands, there will be a shell in between. And it's the shell whose
waitpid() call will see the signal death, and it will then exit with
code 143, which is what Git will see.
To make matters even more confusing, some shells (such as bash) will
realize that there's nothing for the shell to do after test-tool
finishes, and will turn it into an exec. So the test was only checking
what it thought when /bin/sh points to a shell like bash (we're relying
on the shell used internally by Git to spawn sub-commands here, so even
running the test under bash would not be enough).
This patch adjusts the tests to explicitly call "exec" in the pager
command, which produces a consistent outcome regardless of shell. Note
that without the code change in this patch it _should_ fail reliably,
but doesn't. That test, like its siblings, tries to trigger SIGPIPE in
the git-log process writing to the pager, but only do so racily. That
will be fixed in a follow-on patch.
For the code change here, we have two options:
- we can teach the in_signal code to handle WIFSIGNALED()
- we can stop returning early when in_signal is set, and instead
annotate individual calls that we need to skip in this case
The former is a simpler patch, but means we're essentially duplicating
all of the logic. So instead I went with the latter. The result is a
bigger patch, and we do run the risk of new code being added but
forgetting to handle in_signal. But in the long run it seems more
maintainable.
I've skipped any non-trivial calls for the in_signal case, like calling
error(). We'll also skip the call to clear_child_for_cleanup(), as we
were before. This is arguably the wrong thing to do, since we wouldn't
want to try to clean it up again. But:
- we can't call it as-is, because it calls free(), which we must avoid
in a signal handler (we'd have to pass in_signal so it can skip the
free() call)
- we'll only go through the list of children to clean once, since our
cleanup_children_on_signal() handler pops itself after running (and
then re-raises, so eventually we'd just exit). So this cleanup only
matters if a process is on the cleanup list _and_ it has a separate
handler to clean itself up. Which is questionable in the first place
(and AFAIK we do not do).
- double-cleanup isn't actually that bad anyway. waitpid() will just
return an error, which we won't even report because of in_signal.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-22 21:28:13 +00:00
|
|
|
if (!in_signal)
|
|
|
|
error("waitpid is confused (%s)", argv0);
|
2010-01-10 13:08:45 +00:00
|
|
|
}
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
|
run-command: unify signal and regular logic for wait_or_whine()
Since 507d7804c0 (pager: don't use unsafe functions in signal handlers,
2015-09-04), we have a separate code path in wait_or_whine() for the
case that we're in a signal handler. But that code path misses some of
the cases handled by the main logic.
This was improved in be8fc53e36 (pager: properly log pager exit code
when signalled, 2021-02-02), but that covered only case: actually
returning the correct error code. But there are some other cases:
- if waitpid() returns failure, we wouldn't notice and would look at
uninitialized garbage in the status variable; it's not clear if it's
possible to trigger this or not
- if the process exited by signal, then we would still report "-1"
rather than the correct signal code
This latter case even had a test added in be8fc53e36, but it doesn't
work reliably. It sets the pager command to:
>pager-used; test-tool sigchain
The latter command will die by signal, but because there are multiple
commands, there will be a shell in between. And it's the shell whose
waitpid() call will see the signal death, and it will then exit with
code 143, which is what Git will see.
To make matters even more confusing, some shells (such as bash) will
realize that there's nothing for the shell to do after test-tool
finishes, and will turn it into an exec. So the test was only checking
what it thought when /bin/sh points to a shell like bash (we're relying
on the shell used internally by Git to spawn sub-commands here, so even
running the test under bash would not be enough).
This patch adjusts the tests to explicitly call "exec" in the pager
command, which produces a consistent outcome regardless of shell. Note
that without the code change in this patch it _should_ fail reliably,
but doesn't. That test, like its siblings, tries to trigger SIGPIPE in
the git-log process writing to the pager, but only do so racily. That
will be fixed in a follow-on patch.
For the code change here, we have two options:
- we can teach the in_signal code to handle WIFSIGNALED()
- we can stop returning early when in_signal is set, and instead
annotate individual calls that we need to skip in this case
The former is a simpler patch, but means we're essentially duplicating
all of the logic. So instead I went with the latter. The result is a
bigger patch, and we do run the risk of new code being added but
forgetting to handle in_signal. But in the long run it seems more
maintainable.
I've skipped any non-trivial calls for the in_signal case, like calling
error(). We'll also skip the call to clear_child_for_cleanup(), as we
were before. This is arguably the wrong thing to do, since we wouldn't
want to try to clean it up again. But:
- we can't call it as-is, because it calls free(), which we must avoid
in a signal handler (we'd have to pass in_signal so it can skip the
free() call)
- we'll only go through the list of children to clean once, since our
cleanup_children_on_signal() handler pops itself after running (and
then re-raises, so eventually we'd just exit). So this cleanup only
matters if a process is on the cleanup list _and_ it has a separate
handler to clean itself up. Which is questionable in the first place
(and AFAIK we do not do).
- double-cleanup isn't actually that bad anyway. waitpid() will just
return an error, which we won't even report because of in_signal.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-22 21:28:13 +00:00
|
|
|
if (!in_signal)
|
|
|
|
clear_child_for_cleanup(pid);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
|
2010-01-10 13:08:45 +00:00
|
|
|
errno = failed_errno;
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
2018-01-18 09:45:11 +00:00
|
|
|
static void trace_add_env(struct strbuf *dst, const char *const *deltaenv)
|
|
|
|
{
|
|
|
|
struct string_list envs = STRING_LIST_INIT_DUP;
|
|
|
|
const char *const *e;
|
|
|
|
int i;
|
|
|
|
int printed_unset = 0;
|
|
|
|
|
|
|
|
/* Last one wins, see run-command.c:prep_childenv() for context */
|
|
|
|
for (e = deltaenv; e && *e; e++) {
|
|
|
|
struct strbuf key = STRBUF_INIT;
|
|
|
|
char *equals = strchr(*e, '=');
|
|
|
|
|
|
|
|
if (equals) {
|
|
|
|
strbuf_add(&key, *e, equals - *e);
|
|
|
|
string_list_insert(&envs, key.buf)->util = equals + 1;
|
|
|
|
} else {
|
|
|
|
string_list_insert(&envs, *e)->util = NULL;
|
|
|
|
}
|
|
|
|
strbuf_release(&key);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* "unset X Y...;" */
|
|
|
|
for (i = 0; i < envs.nr; i++) {
|
|
|
|
const char *var = envs.items[i].string;
|
|
|
|
const char *val = envs.items[i].util;
|
|
|
|
|
|
|
|
if (val || !getenv(var))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!printed_unset) {
|
|
|
|
strbuf_addstr(dst, " unset");
|
|
|
|
printed_unset = 1;
|
|
|
|
}
|
|
|
|
strbuf_addf(dst, " %s", var);
|
|
|
|
}
|
|
|
|
if (printed_unset)
|
|
|
|
strbuf_addch(dst, ';');
|
|
|
|
|
|
|
|
/* ... followed by "A=B C=D ..." */
|
|
|
|
for (i = 0; i < envs.nr; i++) {
|
|
|
|
const char *var = envs.items[i].string;
|
|
|
|
const char *val = envs.items[i].util;
|
|
|
|
const char *oldval;
|
|
|
|
|
|
|
|
if (!val)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
oldval = getenv(var);
|
|
|
|
if (oldval && !strcmp(val, oldval))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
strbuf_addf(dst, " %s=", var);
|
|
|
|
sq_quote_buf_pretty(dst, val);
|
|
|
|
}
|
|
|
|
string_list_clear(&envs, 0);
|
|
|
|
}
|
|
|
|
|
2018-01-18 09:45:09 +00:00
|
|
|
static void trace_run_command(const struct child_process *cp)
|
|
|
|
{
|
|
|
|
struct strbuf buf = STRBUF_INIT;
|
|
|
|
|
|
|
|
if (!trace_want(&trace_default_key))
|
|
|
|
return;
|
|
|
|
|
2018-03-25 10:57:50 +00:00
|
|
|
strbuf_addstr(&buf, "trace: run_command:");
|
2018-01-18 09:45:12 +00:00
|
|
|
if (cp->dir) {
|
|
|
|
strbuf_addstr(&buf, " cd ");
|
|
|
|
sq_quote_buf_pretty(&buf, cp->dir);
|
|
|
|
strbuf_addch(&buf, ';');
|
|
|
|
}
|
2022-06-02 09:09:50 +00:00
|
|
|
trace_add_env(&buf, cp->env.v);
|
2018-01-18 09:45:10 +00:00
|
|
|
if (cp->git_cmd)
|
|
|
|
strbuf_addstr(&buf, " git");
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
sq_quote_argv_pretty(&buf, cp->args.v);
|
2018-01-18 09:45:09 +00:00
|
|
|
|
|
|
|
trace_printf("%s", buf.buf);
|
|
|
|
strbuf_release(&buf);
|
|
|
|
}
|
|
|
|
|
2007-03-10 08:28:05 +00:00
|
|
|
int start_command(struct child_process *cmd)
|
2005-07-31 19:17:43 +00:00
|
|
|
{
|
2007-10-19 19:47:58 +00:00
|
|
|
int need_in, need_out, need_err;
|
|
|
|
int fdin[2], fdout[2], fderr[2];
|
2013-03-21 15:45:00 +00:00
|
|
|
int failed_errno;
|
2013-01-31 02:01:05 +00:00
|
|
|
char *str;
|
2007-03-10 08:28:08 +00:00
|
|
|
|
2008-02-21 22:42:56 +00:00
|
|
|
/*
|
|
|
|
* In case of errors we must keep the promise to close FDs
|
|
|
|
* that have been passed in via ->in and ->out.
|
|
|
|
*/
|
|
|
|
|
2007-03-12 18:37:55 +00:00
|
|
|
need_in = !cmd->no_stdin && cmd->in < 0;
|
2007-03-10 08:28:08 +00:00
|
|
|
if (need_in) {
|
2008-02-21 22:42:56 +00:00
|
|
|
if (pipe(fdin) < 0) {
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
failed_errno = errno;
|
2008-02-21 22:42:56 +00:00
|
|
|
if (cmd->out > 0)
|
|
|
|
close(cmd->out);
|
2013-01-31 02:01:05 +00:00
|
|
|
str = "standard input";
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
goto fail_pipe;
|
2008-02-21 22:42:56 +00:00
|
|
|
}
|
2007-03-10 08:28:08 +00:00
|
|
|
cmd->in = fdin[1];
|
|
|
|
}
|
|
|
|
|
2007-03-12 18:37:55 +00:00
|
|
|
need_out = !cmd->no_stdout
|
|
|
|
&& !cmd->stdout_to_stderr
|
|
|
|
&& cmd->out < 0;
|
2007-03-12 18:37:45 +00:00
|
|
|
if (need_out) {
|
|
|
|
if (pipe(fdout) < 0) {
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
failed_errno = errno;
|
2007-03-12 18:37:45 +00:00
|
|
|
if (need_in)
|
|
|
|
close_pair(fdin);
|
2008-02-21 22:42:56 +00:00
|
|
|
else if (cmd->in)
|
|
|
|
close(cmd->in);
|
2013-01-31 02:01:05 +00:00
|
|
|
str = "standard output";
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
goto fail_pipe;
|
2007-03-12 18:37:45 +00:00
|
|
|
}
|
|
|
|
cmd->out = fdout[0];
|
|
|
|
}
|
|
|
|
|
2007-11-11 07:29:37 +00:00
|
|
|
need_err = !cmd->no_stderr && cmd->err < 0;
|
2007-10-19 19:47:58 +00:00
|
|
|
if (need_err) {
|
|
|
|
if (pipe(fderr) < 0) {
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
failed_errno = errno;
|
2007-10-19 19:47:58 +00:00
|
|
|
if (need_in)
|
|
|
|
close_pair(fdin);
|
2008-02-21 22:42:56 +00:00
|
|
|
else if (cmd->in)
|
|
|
|
close(cmd->in);
|
2007-10-19 19:47:58 +00:00
|
|
|
if (need_out)
|
|
|
|
close_pair(fdout);
|
2008-02-21 22:42:56 +00:00
|
|
|
else if (cmd->out)
|
|
|
|
close(cmd->out);
|
2013-01-31 02:01:05 +00:00
|
|
|
str = "standard error";
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
fail_pipe:
|
2013-01-31 02:01:05 +00:00
|
|
|
error("cannot create %s pipe for %s: %s",
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
str, cmd->args.v[0], strerror(failed_errno));
|
2015-10-24 12:11:27 +00:00
|
|
|
child_process_clear(cmd);
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
errno = failed_errno;
|
|
|
|
return -1;
|
2007-10-19 19:47:58 +00:00
|
|
|
}
|
|
|
|
cmd->err = fderr[0];
|
|
|
|
}
|
|
|
|
|
2019-02-22 22:25:01 +00:00
|
|
|
trace2_child_start(cmd);
|
2018-01-18 09:45:09 +00:00
|
|
|
trace_run_command(cmd);
|
|
|
|
|
2011-02-04 08:41:58 +00:00
|
|
|
fflush(NULL);
|
2008-07-07 13:41:34 +00:00
|
|
|
|
2021-09-09 09:47:06 +00:00
|
|
|
if (cmd->close_object_store)
|
|
|
|
close_object_store(the_repository->objects);
|
|
|
|
|
2013-05-02 19:26:08 +00:00
|
|
|
#ifndef GIT_WINDOWS_NATIVE
|
2010-01-10 13:11:22 +00:00
|
|
|
{
|
|
|
|
int notify_pipe[2];
|
2017-04-19 23:13:23 +00:00
|
|
|
int null_fd = -1;
|
2017-04-19 23:13:22 +00:00
|
|
|
char **childenv;
|
2020-07-28 20:25:12 +00:00
|
|
|
struct strvec argv = STRVEC_INIT;
|
2017-04-19 23:13:24 +00:00
|
|
|
struct child_err cerr;
|
2017-04-19 23:13:27 +00:00
|
|
|
struct atfork_state as;
|
2017-04-19 23:13:19 +00:00
|
|
|
|
2018-10-24 07:38:00 +00:00
|
|
|
if (prepare_cmd(&argv, cmd) < 0) {
|
|
|
|
failed_errno = errno;
|
|
|
|
cmd->pid = -1;
|
2018-12-11 05:46:07 +00:00
|
|
|
if (!cmd->silent_exec_failure)
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
error_errno("cannot run %s", cmd->args.v[0]);
|
2018-10-24 07:38:00 +00:00
|
|
|
goto end_of_spawn;
|
|
|
|
}
|
|
|
|
|
2010-01-10 13:11:22 +00:00
|
|
|
if (pipe(notify_pipe))
|
|
|
|
notify_pipe[0] = notify_pipe[1] = -1;
|
|
|
|
|
2017-04-19 23:13:23 +00:00
|
|
|
if (cmd->no_stdin || cmd->no_stdout || cmd->no_stderr) {
|
2021-08-25 20:16:46 +00:00
|
|
|
null_fd = xopen("/dev/null", O_RDWR | O_CLOEXEC);
|
2017-04-19 23:13:23 +00:00
|
|
|
set_cloexec(null_fd);
|
|
|
|
}
|
|
|
|
|
2022-06-02 09:09:50 +00:00
|
|
|
childenv = prep_childenv(cmd->env.v);
|
2017-04-19 23:13:27 +00:00
|
|
|
atfork_prepare(&as);
|
2017-04-19 23:13:19 +00:00
|
|
|
|
2017-04-19 23:13:26 +00:00
|
|
|
/*
|
|
|
|
* NOTE: In order to prevent deadlocking when using threads special
|
|
|
|
* care should be taken with the function calls made in between the
|
|
|
|
* fork() and exec() calls. No calls should be made to functions which
|
|
|
|
* require acquiring a lock (e.g. malloc) as the lock could have been
|
|
|
|
* held by another thread at the time of forking, causing the lock to
|
|
|
|
* never be released in the child process. This means only
|
|
|
|
* Async-Signal-Safe functions are permitted in the child.
|
|
|
|
*/
|
2007-03-10 08:28:05 +00:00
|
|
|
cmd->pid = fork();
|
2013-03-21 15:45:00 +00:00
|
|
|
failed_errno = errno;
|
2007-03-10 08:28:05 +00:00
|
|
|
if (!cmd->pid) {
|
2017-04-19 23:13:27 +00:00
|
|
|
int sig;
|
2010-01-10 13:07:52 +00:00
|
|
|
/*
|
2017-04-19 23:13:24 +00:00
|
|
|
* Ensure the default die/error/warn routines do not get
|
|
|
|
* called, they can take stdio locks and malloc.
|
2010-01-10 13:07:52 +00:00
|
|
|
*/
|
2017-04-19 23:13:24 +00:00
|
|
|
set_die_routine(child_die_fn);
|
|
|
|
set_error_routine(child_error_fn);
|
|
|
|
set_warn_routine(child_warn_fn);
|
2010-01-10 13:07:52 +00:00
|
|
|
|
2010-01-10 13:11:22 +00:00
|
|
|
close(notify_pipe[0]);
|
|
|
|
set_cloexec(notify_pipe[1]);
|
|
|
|
child_notifier = notify_pipe[1];
|
|
|
|
|
2007-03-12 18:37:55 +00:00
|
|
|
if (cmd->no_stdin)
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(null_fd, 0);
|
2007-03-12 18:37:55 +00:00
|
|
|
else if (need_in) {
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(fdin[0], 0);
|
|
|
|
child_close_pair(fdin);
|
2007-03-10 08:28:08 +00:00
|
|
|
} else if (cmd->in) {
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(cmd->in, 0);
|
|
|
|
child_close(cmd->in);
|
2006-12-31 02:55:22 +00:00
|
|
|
}
|
2007-03-10 08:28:08 +00:00
|
|
|
|
2008-03-05 07:35:16 +00:00
|
|
|
if (cmd->no_stderr)
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(null_fd, 2);
|
2008-03-05 07:35:16 +00:00
|
|
|
else if (need_err) {
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(fderr[1], 2);
|
|
|
|
child_close_pair(fderr);
|
2010-02-05 20:57:37 +00:00
|
|
|
} else if (cmd->err > 1) {
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(cmd->err, 2);
|
|
|
|
child_close(cmd->err);
|
2008-03-05 07:35:16 +00:00
|
|
|
}
|
|
|
|
|
2007-03-12 18:37:55 +00:00
|
|
|
if (cmd->no_stdout)
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(null_fd, 1);
|
2007-03-12 18:37:55 +00:00
|
|
|
else if (cmd->stdout_to_stderr)
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(2, 1);
|
2007-03-12 18:37:45 +00:00
|
|
|
else if (need_out) {
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(fdout[1], 1);
|
|
|
|
child_close_pair(fdout);
|
2007-03-12 18:37:45 +00:00
|
|
|
} else if (cmd->out > 1) {
|
2017-04-19 23:13:25 +00:00
|
|
|
child_dup2(cmd->out, 1);
|
|
|
|
child_close(cmd->out);
|
2007-03-12 18:37:45 +00:00
|
|
|
}
|
|
|
|
|
2007-05-22 21:48:23 +00:00
|
|
|
if (cmd->dir && chdir(cmd->dir))
|
2017-04-19 23:13:24 +00:00
|
|
|
child_die(CHILD_ERR_CHDIR);
|
2017-04-19 23:13:19 +00:00
|
|
|
|
2017-04-19 23:13:27 +00:00
|
|
|
/*
|
|
|
|
* restore default signal handlers here, in case
|
|
|
|
* we catch a signal right before execve below
|
|
|
|
*/
|
|
|
|
for (sig = 1; sig < NSIG; sig++) {
|
|
|
|
/* ignored signals get reset to SIG_DFL on execve */
|
|
|
|
if (signal(sig, SIG_DFL) == SIG_IGN)
|
|
|
|
signal(sig, SIG_IGN);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sigprocmask(SIG_SETMASK, &as.old, NULL) != 0)
|
|
|
|
child_die(CHILD_ERR_SIGPROCMASK);
|
|
|
|
|
2017-04-19 23:13:20 +00:00
|
|
|
/*
|
|
|
|
* Attempt to exec using the command and arguments starting at
|
|
|
|
* argv.argv[1]. argv.argv[0] contains SHELL_PATH which will
|
|
|
|
* be used in the event exec failed with ENOEXEC at which point
|
|
|
|
* we will try to interpret the command using 'sh'.
|
|
|
|
*/
|
2020-07-29 00:37:20 +00:00
|
|
|
execve(argv.v[1], (char *const *) argv.v + 1,
|
2017-04-19 23:13:22 +00:00
|
|
|
(char *const *) childenv);
|
2017-04-19 23:13:20 +00:00
|
|
|
if (errno == ENOEXEC)
|
2020-07-29 00:37:20 +00:00
|
|
|
execve(argv.v[0], (char *const *) argv.v,
|
2017-04-19 23:13:22 +00:00
|
|
|
(char *const *) childenv);
|
2017-04-19 23:13:19 +00:00
|
|
|
|
2023-06-10 14:51:21 +00:00
|
|
|
if (cmd->silent_exec_failure && errno == ENOENT)
|
|
|
|
child_die(CHILD_ERR_SILENT);
|
|
|
|
child_die(CHILD_ERR_ERRNO);
|
2005-07-31 19:17:43 +00:00
|
|
|
}
|
2017-04-19 23:13:27 +00:00
|
|
|
atfork_parent(&as);
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
if (cmd->pid < 0)
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
error_errno("cannot fork() for %s", cmd->args.v[0]);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
else if (cmd->clean_on_exit)
|
2016-10-16 23:20:28 +00:00
|
|
|
mark_child_for_cleanup(cmd->pid, cmd);
|
2010-01-10 13:11:22 +00:00
|
|
|
|
|
|
|
/*
|
2017-04-19 23:13:19 +00:00
|
|
|
* Wait for child's exec. If the exec succeeds (or if fork()
|
2010-01-10 13:11:22 +00:00
|
|
|
* failed), EOF is seen immediately by the parent. Otherwise, the
|
2017-04-19 23:13:24 +00:00
|
|
|
* child process sends a child_err struct.
|
2010-01-10 13:11:22 +00:00
|
|
|
* Note that use of this infrastructure is completely advisory,
|
|
|
|
* therefore, we keep error checks minimal.
|
|
|
|
*/
|
|
|
|
close(notify_pipe[1]);
|
2017-04-19 23:13:24 +00:00
|
|
|
if (xread(notify_pipe[0], &cerr, sizeof(cerr)) == sizeof(cerr)) {
|
2010-01-10 13:11:22 +00:00
|
|
|
/*
|
2017-04-19 23:13:19 +00:00
|
|
|
* At this point we know that fork() succeeded, but exec()
|
2010-01-10 13:11:22 +00:00
|
|
|
* failed. Errors have been reported to our stderr.
|
|
|
|
*/
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
wait_or_whine(cmd->pid, cmd->args.v[0], 0);
|
2017-04-19 23:13:24 +00:00
|
|
|
child_err_spew(cmd, &cerr);
|
2010-01-10 13:11:22 +00:00
|
|
|
failed_errno = errno;
|
|
|
|
cmd->pid = -1;
|
|
|
|
}
|
|
|
|
close(notify_pipe[0]);
|
2017-04-19 23:13:19 +00:00
|
|
|
|
2017-04-19 23:13:23 +00:00
|
|
|
if (null_fd >= 0)
|
|
|
|
close(null_fd);
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_clear(&argv);
|
2017-04-19 23:13:22 +00:00
|
|
|
free(childenv);
|
2010-01-10 13:11:22 +00:00
|
|
|
}
|
2018-10-24 07:38:00 +00:00
|
|
|
end_of_spawn:
|
|
|
|
|
2007-12-07 21:08:59 +00:00
|
|
|
#else
|
2009-09-16 08:20:17 +00:00
|
|
|
{
|
Windows: avoid the "dup dance" when spawning a child process
When stdin, stdout, or stderr must be redirected for a child process that
on Windows is spawned using one of the spawn() functions of Microsoft's
C runtime, then there is no choice other than to
1. make a backup copy of fd 0,1,2 with dup
2. dup2 the redirection source fd into 0,1,2
3. spawn
4. dup2 the backup back into 0,1,2
5. close the backup copy and the redirection source
We used this idiom as well -- but we are not using the spawn() functions
anymore!
Instead, we have our own implementation. We had hardcoded that stdin,
stdout, and stderr of the child process were inherited from the parent's
fds 0, 1, and 2. But we can actually specify any fd.
With this patch, the fds to inherit are passed from start_command()'s
WIN32 section to our spawn implementation. This way, we can avoid the
backup copies of the fds.
The backup copies were a bug waiting to surface: The OS handles underlying
the dup()ed fds were inherited by the child process (but were not
associated with a file descriptor in the child). Consequently, the file or
pipe represented by the OS handle remained open even after the backup copy
was closed in the parent process until the child exited.
Since our implementation of pipe() creates non-inheritable OS handles, we
still dup() file descriptors in start_command() because dup() happens to
create inheritable duplicates. (A nice side effect is that the fd cleanup
in start_command is the same for Windows and Unix and remains unchanged.)
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-01-15 20:12:18 +00:00
|
|
|
int fhin = 0, fhout = 1, fherr = 2;
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
const char **sargv = cmd->args.v;
|
2020-07-28 20:25:12 +00:00
|
|
|
struct strvec nargv = STRVEC_INIT;
|
2007-12-07 21:08:59 +00:00
|
|
|
|
Windows: avoid the "dup dance" when spawning a child process
When stdin, stdout, or stderr must be redirected for a child process that
on Windows is spawned using one of the spawn() functions of Microsoft's
C runtime, then there is no choice other than to
1. make a backup copy of fd 0,1,2 with dup
2. dup2 the redirection source fd into 0,1,2
3. spawn
4. dup2 the backup back into 0,1,2
5. close the backup copy and the redirection source
We used this idiom as well -- but we are not using the spawn() functions
anymore!
Instead, we have our own implementation. We had hardcoded that stdin,
stdout, and stderr of the child process were inherited from the parent's
fds 0, 1, and 2. But we can actually specify any fd.
With this patch, the fds to inherit are passed from start_command()'s
WIN32 section to our spawn implementation. This way, we can avoid the
backup copies of the fds.
The backup copies were a bug waiting to surface: The OS handles underlying
the dup()ed fds were inherited by the child process (but were not
associated with a file descriptor in the child). Consequently, the file or
pipe represented by the OS handle remained open even after the backup copy
was closed in the parent process until the child exited.
Since our implementation of pipe() creates non-inheritable OS handles, we
still dup() file descriptors in start_command() because dup() happens to
create inheritable duplicates. (A nice side effect is that the fd cleanup
in start_command is the same for Windows and Unix and remains unchanged.)
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-01-15 20:12:18 +00:00
|
|
|
if (cmd->no_stdin)
|
|
|
|
fhin = open("/dev/null", O_RDWR);
|
|
|
|
else if (need_in)
|
|
|
|
fhin = dup(fdin[0]);
|
|
|
|
else if (cmd->in)
|
|
|
|
fhin = dup(cmd->in);
|
|
|
|
|
|
|
|
if (cmd->no_stderr)
|
|
|
|
fherr = open("/dev/null", O_RDWR);
|
|
|
|
else if (need_err)
|
|
|
|
fherr = dup(fderr[1]);
|
2010-02-06 05:08:53 +00:00
|
|
|
else if (cmd->err > 2)
|
|
|
|
fherr = dup(cmd->err);
|
Windows: avoid the "dup dance" when spawning a child process
When stdin, stdout, or stderr must be redirected for a child process that
on Windows is spawned using one of the spawn() functions of Microsoft's
C runtime, then there is no choice other than to
1. make a backup copy of fd 0,1,2 with dup
2. dup2 the redirection source fd into 0,1,2
3. spawn
4. dup2 the backup back into 0,1,2
5. close the backup copy and the redirection source
We used this idiom as well -- but we are not using the spawn() functions
anymore!
Instead, we have our own implementation. We had hardcoded that stdin,
stdout, and stderr of the child process were inherited from the parent's
fds 0, 1, and 2. But we can actually specify any fd.
With this patch, the fds to inherit are passed from start_command()'s
WIN32 section to our spawn implementation. This way, we can avoid the
backup copies of the fds.
The backup copies were a bug waiting to surface: The OS handles underlying
the dup()ed fds were inherited by the child process (but were not
associated with a file descriptor in the child). Consequently, the file or
pipe represented by the OS handle remained open even after the backup copy
was closed in the parent process until the child exited.
Since our implementation of pipe() creates non-inheritable OS handles, we
still dup() file descriptors in start_command() because dup() happens to
create inheritable duplicates. (A nice side effect is that the fd cleanup
in start_command is the same for Windows and Unix and remains unchanged.)
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-01-15 20:12:18 +00:00
|
|
|
|
|
|
|
if (cmd->no_stdout)
|
|
|
|
fhout = open("/dev/null", O_RDWR);
|
|
|
|
else if (cmd->stdout_to_stderr)
|
|
|
|
fhout = dup(fherr);
|
|
|
|
else if (need_out)
|
|
|
|
fhout = dup(fdout[1]);
|
|
|
|
else if (cmd->out > 1)
|
|
|
|
fhout = dup(cmd->out);
|
2007-12-07 21:08:59 +00:00
|
|
|
|
2013-10-31 09:25:45 +00:00
|
|
|
if (cmd->git_cmd)
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
cmd->args.v = prepare_git_cmd(&nargv, sargv);
|
2013-10-31 09:25:45 +00:00
|
|
|
else if (cmd->use_shell)
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
cmd->args.v = prepare_shell_cmd(&nargv, sargv);
|
2007-12-07 21:08:59 +00:00
|
|
|
|
2022-06-02 09:09:50 +00:00
|
|
|
cmd->pid = mingw_spawnvpe(cmd->args.v[0], cmd->args.v,
|
|
|
|
(char**) cmd->env.v,
|
|
|
|
cmd->dir, fhin, fhout, fherr);
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
failed_errno = errno;
|
2009-07-04 19:26:42 +00:00
|
|
|
if (cmd->pid < 0 && (!cmd->silent_exec_failure || errno != ENOENT))
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
error_errno("cannot spawn %s", cmd->args.v[0]);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
if (cmd->clean_on_exit && cmd->pid >= 0)
|
2016-10-16 23:20:28 +00:00
|
|
|
mark_child_for_cleanup(cmd->pid, cmd);
|
2007-12-07 21:08:59 +00:00
|
|
|
|
2020-07-28 20:25:12 +00:00
|
|
|
strvec_clear(&nargv);
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
cmd->args.v = sargv;
|
Windows: avoid the "dup dance" when spawning a child process
When stdin, stdout, or stderr must be redirected for a child process that
on Windows is spawned using one of the spawn() functions of Microsoft's
C runtime, then there is no choice other than to
1. make a backup copy of fd 0,1,2 with dup
2. dup2 the redirection source fd into 0,1,2
3. spawn
4. dup2 the backup back into 0,1,2
5. close the backup copy and the redirection source
We used this idiom as well -- but we are not using the spawn() functions
anymore!
Instead, we have our own implementation. We had hardcoded that stdin,
stdout, and stderr of the child process were inherited from the parent's
fds 0, 1, and 2. But we can actually specify any fd.
With this patch, the fds to inherit are passed from start_command()'s
WIN32 section to our spawn implementation. This way, we can avoid the
backup copies of the fds.
The backup copies were a bug waiting to surface: The OS handles underlying
the dup()ed fds were inherited by the child process (but were not
associated with a file descriptor in the child). Consequently, the file or
pipe represented by the OS handle remained open even after the backup copy
was closed in the parent process until the child exited.
Since our implementation of pipe() creates non-inheritable OS handles, we
still dup() file descriptors in start_command() because dup() happens to
create inheritable duplicates. (A nice side effect is that the fd cleanup
in start_command is the same for Windows and Unix and remains unchanged.)
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-01-15 20:12:18 +00:00
|
|
|
if (fhin != 0)
|
|
|
|
close(fhin);
|
|
|
|
if (fhout != 1)
|
|
|
|
close(fhout);
|
|
|
|
if (fherr != 2)
|
|
|
|
close(fherr);
|
2009-09-16 08:20:17 +00:00
|
|
|
}
|
2007-12-07 21:08:59 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (cmd->pid < 0) {
|
2019-02-22 22:25:01 +00:00
|
|
|
trace2_child_exit(cmd, -1);
|
|
|
|
|
2007-12-07 21:08:59 +00:00
|
|
|
if (need_in)
|
|
|
|
close_pair(fdin);
|
|
|
|
else if (cmd->in)
|
|
|
|
close(cmd->in);
|
|
|
|
if (need_out)
|
|
|
|
close_pair(fdout);
|
|
|
|
else if (cmd->out)
|
|
|
|
close(cmd->out);
|
|
|
|
if (need_err)
|
|
|
|
close_pair(fderr);
|
2010-05-20 18:57:52 +00:00
|
|
|
else if (cmd->err)
|
|
|
|
close(cmd->err);
|
2015-10-24 12:11:27 +00:00
|
|
|
child_process_clear(cmd);
|
run_command: report system call errors instead of returning error codes
The motivation for this change is that system call failures are serious
errors that should be reported to the user, but only few callers took the
burden to decode the error codes that the functions returned into error
messages.
If at all, then only an unspecific error message was given. A prominent
example is this:
$ git upload-pack . | :
fatal: unable to run 'git-upload-pack'
In this example, git-upload-pack, the external command invoked through the
git wrapper, dies due to SIGPIPE, but the git wrapper does not bother to
report the real cause. In fact, this very error message is copied to the
syslog if git-daemon's client aborts the connection early.
With this change, system call failures are reported immediately after the
failure and only a generic failure code is returned to the caller. In the
above example the error is now to the point:
$ git upload-pack . | :
error: git-upload-pack died of signal
Note that there is no error report if the invoked program terminated with
a non-zero exit code, because it is reasonable to expect that the invoked
program has already reported an error. (But many run_command call sites
nevertheless write a generic error message.)
There was one special return code that was used to identify the case where
run_command failed because the requested program could not be exec'd. This
special case is now treated like a system call failure with errno set to
ENOENT. No error is reported in this case, because the call site in git.c
expects this as a normal result. Therefore, the callers that carefully
decoded the return value still check for this condition.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2009-07-04 19:26:40 +00:00
|
|
|
errno = failed_errno;
|
|
|
|
return -1;
|
2007-12-07 21:08:59 +00:00
|
|
|
}
|
2007-03-10 08:28:08 +00:00
|
|
|
|
|
|
|
if (need_in)
|
|
|
|
close(fdin[0]);
|
|
|
|
else if (cmd->in)
|
|
|
|
close(cmd->in);
|
|
|
|
|
2007-03-12 18:37:45 +00:00
|
|
|
if (need_out)
|
|
|
|
close(fdout[1]);
|
2008-02-21 22:42:56 +00:00
|
|
|
else if (cmd->out)
|
2007-03-12 18:37:45 +00:00
|
|
|
close(cmd->out);
|
|
|
|
|
2007-10-19 19:47:58 +00:00
|
|
|
if (need_err)
|
|
|
|
close(fderr[1]);
|
2010-02-05 20:57:37 +00:00
|
|
|
else if (cmd->err)
|
|
|
|
close(cmd->err);
|
2007-10-19 19:47:58 +00:00
|
|
|
|
2007-03-10 08:28:05 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-10-19 19:48:00 +00:00
|
|
|
int finish_command(struct child_process *cmd)
|
|
|
|
{
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
int ret = wait_or_whine(cmd->pid, cmd->args.v[0], 0);
|
2019-02-22 22:25:01 +00:00
|
|
|
trace2_child_exit(cmd, ret);
|
2015-10-24 12:11:27 +00:00
|
|
|
child_process_clear(cmd);
|
2021-02-02 21:09:52 +00:00
|
|
|
invalidate_lstat_cache();
|
2014-05-15 08:33:26 +00:00
|
|
|
return ret;
|
2007-10-19 19:48:00 +00:00
|
|
|
}
|
|
|
|
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 09:35:57 +00:00
|
|
|
int finish_command_in_signal(struct child_process *cmd)
|
|
|
|
{
|
run-command API: remove "argv" member, always use "args"
Remove the "argv" member from the run-command API, ever since "args"
was added in c460c0ecdca (run-command: store an optional argv_array,
2014-05-15) being able to provide either "argv" or "args" has led to
some confusion and bugs.
If we hadn't gone in that direction and only had an "argv" our
problems wouldn't have been solved either, as noted in [1] (and in the
documentation amended here) it comes with inherent memory management
issues: The caller would have to hang on to the "argv" until the
run-command API was finished. If the "argv" was an argument to main()
this wasn't an issue, but if it it was manually constructed using the
API might be painful.
We also have a recent report[2] of a user of the API segfaulting,
which is a direct result of it being complex to use. This commit
addresses the root cause of that bug.
This change is larger than I'd like, but there's no easy way to avoid
it that wouldn't involve even more verbose intermediate steps. We use
the "argv" as the source of truth over the "args", so we need to
change all parts of run-command.[ch] itself, as well as the trace2
logging at the same time.
The resulting Windows-specific code in start_command() is a bit nasty,
as we're now assigning to a strvec's "v" member, instead of to our own
"argv". There was a suggestion of some alternate approaches in reply
to an earlier version of this commit[3], but let's leave larger a
larger and needless refactoring of this code for now.
1. http://lore.kernel.org/git/YT6BnnXeAWn8BycF@coredump.intra.peff.net
2. https://lore.kernel.org/git/20211120194048.12125-1-ematsumiya@suse.de/
3. https://lore.kernel.org/git/patch-5.5-ea1011f7473-20211122T153605Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2021-11-25 22:52:22 +00:00
|
|
|
int ret = wait_or_whine(cmd->pid, cmd->args.v[0], 1);
|
2022-06-07 18:21:57 +00:00
|
|
|
if (ret != -1)
|
|
|
|
trace2_child_exit(cmd, ret);
|
2019-02-22 22:25:01 +00:00
|
|
|
return ret;
|
pager: don't use unsafe functions in signal handlers
Since the commit a3da8821208d (pager: do wait_for_pager on signal
death), we call wait_for_pager() in the pager's signal handler. The
recent bug report revealed that this causes a deadlock in glibc at
aborting "git log" [*1*]. When this happens, git process is left
unterminated, and it can't be killed by SIGTERM but only by SIGKILL.
The problem is that wait_for_pager() function does more than waiting
for pager process's termination, but it does cleanups and printing
errors. Unfortunately, the functions that may be used in a signal
handler are very limited [*2*]. Particularly, malloc(), free() and the
variants can't be used in a signal handler because they take a mutex
internally in glibc. This was the cause of the deadlock above. Other
than the direct calls of malloc/free, many functions calling
malloc/free can't be used. strerror() is such one, either.
Also the usage of fflush() and printf() in a signal handler is bad,
although it seems working so far. In a safer side, we should avoid
them, too.
This patch tries to reduce the calls of such functions in signal
handlers. wait_for_signal() takes a flag and avoids the unsafe
calls. Also, finish_command_in_signal() is introduced for the
same reason. There the free() calls are removed, and only waits for
the children without whining at errors.
[*1*] https://bugzilla.opensuse.org/show_bug.cgi?id=942297
[*2*] http://pubs.opengroup.org/onlinepubs/9699919799/functions/V2_chap02.html#tag_15_04_03
Signed-off-by: Takashi Iwai <tiwai@suse.de>
Reviewed-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-09-04 09:35:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-03-10 08:28:05 +00:00
|
|
|
int run_command(struct child_process *cmd)
|
|
|
|
{
|
2015-03-23 03:54:05 +00:00
|
|
|
int code;
|
|
|
|
|
|
|
|
if (cmd->out < 0 || cmd->err < 0)
|
2018-05-02 09:38:39 +00:00
|
|
|
BUG("run_command with a pipe can cause deadlock");
|
2015-03-23 03:54:05 +00:00
|
|
|
|
|
|
|
code = start_command(cmd);
|
2007-03-10 08:28:05 +00:00
|
|
|
if (code)
|
|
|
|
return code;
|
|
|
|
return finish_command(cmd);
|
|
|
|
}
|
|
|
|
|
2010-03-09 20:00:36 +00:00
|
|
|
#ifndef NO_PTHREADS
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
static pthread_t main_thread;
|
|
|
|
static int main_thread_set;
|
|
|
|
static pthread_key_t async_key;
|
run-command: use thread-aware die_is_recursing routine
If we die from an async thread, we do not actually exit the
program, but just kill the thread. This confuses the static
counter in usage.c's default die_is_recursing function; it
updates the counter once for the thread death, and then when
the main program calls die() itself, it erroneously thinks
we are recursing. The end result is that we print "recursion
detected in die handler" instead of the real error in such a
case (the easiest way to trigger this is having a remote
connection hang up while running a sideband demultiplexer).
This patch solves it by using a per-thread counter when the
async_die function is installed; we detect recursion in each
thread (including the main one), but they do not step on
each other's toes.
Other threaded code does not need to worry about this, as
they do not install specialized die handlers; they just let
a die() from a sub-thread take down the whole program.
Since we are overriding the default recursion-check
function, there is an interesting corner case that is not a
problem, but bears some explanation. Imagine the main thread
calls die(), and then in the die_routine starts an async
call. We will switch to using thread-local storage, which
starts at 0, for the main thread's counter, even though
the original counter was actually at 1. That's OK, though,
for two reasons:
1. It would miss only the first level of recursion, and
would still find recursive failures inside the async
helper.
2. We do not currently and are not likely to start doing
anything as heavyweight as starting an async routine
from within a die routine or helper function.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 19:50:07 +00:00
|
|
|
static pthread_key_t async_die_counter;
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
|
2010-03-06 15:40:42 +00:00
|
|
|
static void *run_thread(void *data)
|
2007-12-08 21:19:14 +00:00
|
|
|
{
|
|
|
|
struct async *async = data;
|
2010-03-09 20:00:36 +00:00
|
|
|
intptr_t ret;
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
|
run-command: teach async threads to ignore SIGPIPE
Async processes can be implemented as separate forked
processes, or as threads (depending on the NO_PTHREADS
setting). In the latter case, if an async thread gets
SIGPIPE, it takes down the whole process. This is obviously
bad if the main process was not otherwise going to die, but
even if we were going to die, it means the main process does
not have a chance to report a useful error message.
There's also the small matter that forked async processes
will not take the main process down on a signal, meaning git
will behave differently depending on the NO_PTHREADS
setting.
This patch fixes it by adding a new flag to "struct async"
to block SIGPIPE just in the async thread. In theory, this
should always be on (which makes async threads behave more
like async processes), but we would first want to make sure
that each async process we spawn is careful about checking
return codes from write() and would not spew endlessly into
a dead pipe. So let's start with it as optional, and we can
enable it for specific sites in future patches.
The natural name for this option would be "ignore_sigpipe",
since that's what it does for the threaded case. But since
that name might imply that we are ignoring it in all cases
(including the separate-process one), let's call it
"isolate_sigpipe". What we are really asking for is
isolation. I.e., not to have our main process taken down by
signals spawned by the async process. How that is
implemented is up to the run-command code.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-04-19 22:49:41 +00:00
|
|
|
if (async->isolate_sigpipe) {
|
|
|
|
sigset_t mask;
|
|
|
|
sigemptyset(&mask);
|
|
|
|
sigaddset(&mask, SIGPIPE);
|
2022-12-02 17:02:58 +00:00
|
|
|
if (pthread_sigmask(SIG_BLOCK, &mask, NULL)) {
|
run-command: teach async threads to ignore SIGPIPE
Async processes can be implemented as separate forked
processes, or as threads (depending on the NO_PTHREADS
setting). In the latter case, if an async thread gets
SIGPIPE, it takes down the whole process. This is obviously
bad if the main process was not otherwise going to die, but
even if we were going to die, it means the main process does
not have a chance to report a useful error message.
There's also the small matter that forked async processes
will not take the main process down on a signal, meaning git
will behave differently depending on the NO_PTHREADS
setting.
This patch fixes it by adding a new flag to "struct async"
to block SIGPIPE just in the async thread. In theory, this
should always be on (which makes async threads behave more
like async processes), but we would first want to make sure
that each async process we spawn is careful about checking
return codes from write() and would not spew endlessly into
a dead pipe. So let's start with it as optional, and we can
enable it for specific sites in future patches.
The natural name for this option would be "ignore_sigpipe",
since that's what it does for the threaded case. But since
that name might imply that we are ignoring it in all cases
(including the separate-process one), let's call it
"isolate_sigpipe". What we are really asking for is
isolation. I.e., not to have our main process taken down by
signals spawned by the async process. How that is
implemented is up to the run-command code.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-04-19 22:49:41 +00:00
|
|
|
ret = error("unable to block SIGPIPE in async thread");
|
|
|
|
return (void *)ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
pthread_setspecific(async_key, async);
|
2010-03-09 20:00:36 +00:00
|
|
|
ret = async->proc(async->proc_in, async->proc_out, async->data);
|
2010-03-06 15:40:42 +00:00
|
|
|
return (void *)ret;
|
2007-12-08 21:19:14 +00:00
|
|
|
}
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
|
|
|
|
static NORETURN void die_async(const char *err, va_list params)
|
|
|
|
{
|
2021-12-07 18:26:30 +00:00
|
|
|
report_fn die_message_fn = get_die_message_routine();
|
|
|
|
|
|
|
|
die_message_fn(err, params);
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
|
2015-09-01 20:22:43 +00:00
|
|
|
if (in_async()) {
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
struct async *async = pthread_getspecific(async_key);
|
|
|
|
if (async->proc_in >= 0)
|
|
|
|
close(async->proc_in);
|
|
|
|
if (async->proc_out >= 0)
|
|
|
|
close(async->proc_out);
|
|
|
|
pthread_exit((void *)128);
|
|
|
|
}
|
|
|
|
|
|
|
|
exit(128);
|
2007-12-08 21:19:14 +00:00
|
|
|
}
|
run-command: use thread-aware die_is_recursing routine
If we die from an async thread, we do not actually exit the
program, but just kill the thread. This confuses the static
counter in usage.c's default die_is_recursing function; it
updates the counter once for the thread death, and then when
the main program calls die() itself, it erroneously thinks
we are recursing. The end result is that we print "recursion
detected in die handler" instead of the real error in such a
case (the easiest way to trigger this is having a remote
connection hang up while running a sideband demultiplexer).
This patch solves it by using a per-thread counter when the
async_die function is installed; we detect recursion in each
thread (including the main one), but they do not step on
each other's toes.
Other threaded code does not need to worry about this, as
they do not install specialized die handlers; they just let
a die() from a sub-thread take down the whole program.
Since we are overriding the default recursion-check
function, there is an interesting corner case that is not a
problem, but bears some explanation. Imagine the main thread
calls die(), and then in the die_routine starts an async
call. We will switch to using thread-local storage, which
starts at 0, for the main thread's counter, even though
the original counter was actually at 1. That's OK, though,
for two reasons:
1. It would miss only the first level of recursion, and
would still find recursive failures inside the async
helper.
2. We do not currently and are not likely to start doing
anything as heavyweight as starting an async routine
from within a die routine or helper function.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 19:50:07 +00:00
|
|
|
|
|
|
|
static int async_die_is_recursing(void)
|
|
|
|
{
|
|
|
|
void *ret = pthread_getspecific(async_die_counter);
|
2021-11-04 04:01:03 +00:00
|
|
|
pthread_setspecific(async_die_counter, &async_die_counter); /* set to any non-NULL valid pointer */
|
run-command: use thread-aware die_is_recursing routine
If we die from an async thread, we do not actually exit the
program, but just kill the thread. This confuses the static
counter in usage.c's default die_is_recursing function; it
updates the counter once for the thread death, and then when
the main program calls die() itself, it erroneously thinks
we are recursing. The end result is that we print "recursion
detected in die handler" instead of the real error in such a
case (the easiest way to trigger this is having a remote
connection hang up while running a sideband demultiplexer).
This patch solves it by using a per-thread counter when the
async_die function is installed; we detect recursion in each
thread (including the main one), but they do not step on
each other's toes.
Other threaded code does not need to worry about this, as
they do not install specialized die handlers; they just let
a die() from a sub-thread take down the whole program.
Since we are overriding the default recursion-check
function, there is an interesting corner case that is not a
problem, but bears some explanation. Imagine the main thread
calls die(), and then in the die_routine starts an async
call. We will switch to using thread-local storage, which
starts at 0, for the main thread's counter, even though
the original counter was actually at 1. That's OK, though,
for two reasons:
1. It would miss only the first level of recursion, and
would still find recursive failures inside the async
helper.
2. We do not currently and are not likely to start doing
anything as heavyweight as starting an async routine
from within a die routine or helper function.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 19:50:07 +00:00
|
|
|
return ret != NULL;
|
|
|
|
}
|
|
|
|
|
2015-09-01 20:22:43 +00:00
|
|
|
int in_async(void)
|
|
|
|
{
|
|
|
|
if (!main_thread_set)
|
|
|
|
return 0; /* no asyncs started yet */
|
|
|
|
return !pthread_equal(main_thread, pthread_self());
|
|
|
|
}
|
|
|
|
|
2016-10-16 23:20:27 +00:00
|
|
|
static void NORETURN async_exit(int code)
|
write_or_die: handle EPIPE in async threads
When write_or_die() sees EPIPE, it treats it specially by
converting it into a SIGPIPE death. We obviously cannot
ignore it, as the write has failed and the caller expects us
to die. But likewise, we cannot just call die(), because
printing any message at all would be a nuisance during
normal operations.
However, this is a problem if write_or_die() is called from
a thread. Our raised signal ends up killing the whole
process, when logically we just need to kill the thread
(after all, if we are ignoring SIGPIPE, there is good reason
to think that the main thread is expecting to handle it).
Inside an async thread, the die() code already does the
right thing, because we use our custom die_async() routine,
which calls pthread_join(). So ideally we would piggy-back
on that, and simply call:
die_quietly_with_code(141);
or similar. But refactoring the die code to do this is
surprisingly non-trivial. The die_routines themselves handle
both printing and the decision of the exit code. Every one
of them would have to be modified to take new parameters for
the code, and to tell us to be quiet.
Instead, we can just teach write_or_die() to check for the
async case and handle it specially. We do have to build an
interface to abstract the async exit, but it's simple and
self-contained. If we had many call-sites that wanted to do
this die_quietly_with_code(), this approach wouldn't scale
as well, but we don't. This is the only place where do this
weird exit trick.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-02-24 07:40:16 +00:00
|
|
|
{
|
|
|
|
pthread_exit((void *)(intptr_t)code);
|
|
|
|
}
|
|
|
|
|
2014-10-18 12:31:15 +00:00
|
|
|
#else
|
|
|
|
|
|
|
|
static struct {
|
|
|
|
void (**handlers)(void);
|
|
|
|
size_t nr;
|
|
|
|
size_t alloc;
|
|
|
|
} git_atexit_hdlrs;
|
|
|
|
|
|
|
|
static int git_atexit_installed;
|
|
|
|
|
2014-11-10 21:17:00 +00:00
|
|
|
static void git_atexit_dispatch(void)
|
2014-10-18 12:31:15 +00:00
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for (i=git_atexit_hdlrs.nr ; i ; i--)
|
|
|
|
git_atexit_hdlrs.handlers[i-1]();
|
|
|
|
}
|
|
|
|
|
2014-11-10 21:17:00 +00:00
|
|
|
static void git_atexit_clear(void)
|
2014-10-18 12:31:15 +00:00
|
|
|
{
|
|
|
|
free(git_atexit_hdlrs.handlers);
|
|
|
|
memset(&git_atexit_hdlrs, 0, sizeof(git_atexit_hdlrs));
|
|
|
|
git_atexit_installed = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef atexit
|
|
|
|
int git_atexit(void (*handler)(void))
|
|
|
|
{
|
|
|
|
ALLOC_GROW(git_atexit_hdlrs.handlers, git_atexit_hdlrs.nr + 1, git_atexit_hdlrs.alloc);
|
|
|
|
git_atexit_hdlrs.handlers[git_atexit_hdlrs.nr++] = handler;
|
|
|
|
if (!git_atexit_installed) {
|
|
|
|
if (atexit(&git_atexit_dispatch))
|
|
|
|
return -1;
|
|
|
|
git_atexit_installed = 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#define atexit git_atexit
|
|
|
|
|
2015-09-01 20:22:43 +00:00
|
|
|
static int process_is_async;
|
|
|
|
int in_async(void)
|
|
|
|
{
|
|
|
|
return process_is_async;
|
|
|
|
}
|
|
|
|
|
2016-10-16 23:20:27 +00:00
|
|
|
static void NORETURN async_exit(int code)
|
write_or_die: handle EPIPE in async threads
When write_or_die() sees EPIPE, it treats it specially by
converting it into a SIGPIPE death. We obviously cannot
ignore it, as the write has failed and the caller expects us
to die. But likewise, we cannot just call die(), because
printing any message at all would be a nuisance during
normal operations.
However, this is a problem if write_or_die() is called from
a thread. Our raised signal ends up killing the whole
process, when logically we just need to kill the thread
(after all, if we are ignoring SIGPIPE, there is good reason
to think that the main thread is expecting to handle it).
Inside an async thread, the die() code already does the
right thing, because we use our custom die_async() routine,
which calls pthread_join(). So ideally we would piggy-back
on that, and simply call:
die_quietly_with_code(141);
or similar. But refactoring the die code to do this is
surprisingly non-trivial. The die_routines themselves handle
both printing and the decision of the exit code. Every one
of them would have to be modified to take new parameters for
the code, and to tell us to be quiet.
Instead, we can just teach write_or_die() to check for the
async case and handle it specially. We do have to build an
interface to abstract the async exit, but it's simple and
self-contained. If we had many call-sites that wanted to do
this die_quietly_with_code(), this approach wouldn't scale
as well, but we don't. This is the only place where do this
weird exit trick.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2016-02-24 07:40:16 +00:00
|
|
|
{
|
|
|
|
exit(code);
|
|
|
|
}
|
|
|
|
|
2007-12-08 21:19:14 +00:00
|
|
|
#endif
|
|
|
|
|
2016-10-16 23:20:27 +00:00
|
|
|
void check_pipe(int err)
|
|
|
|
{
|
|
|
|
if (err == EPIPE) {
|
|
|
|
if (in_async())
|
|
|
|
async_exit(141);
|
|
|
|
|
|
|
|
signal(SIGPIPE, SIG_DFL);
|
|
|
|
raise(SIGPIPE);
|
|
|
|
/* Should never happen, but just in case... */
|
|
|
|
exit(141);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-10-19 19:48:00 +00:00
|
|
|
int start_async(struct async *async)
|
|
|
|
{
|
2010-02-05 20:57:38 +00:00
|
|
|
int need_in, need_out;
|
|
|
|
int fdin[2], fdout[2];
|
|
|
|
int proc_in, proc_out;
|
2007-10-19 19:48:00 +00:00
|
|
|
|
2010-02-05 20:57:38 +00:00
|
|
|
need_in = async->in < 0;
|
|
|
|
if (need_in) {
|
|
|
|
if (pipe(fdin) < 0) {
|
|
|
|
if (async->out > 0)
|
|
|
|
close(async->out);
|
2016-05-08 09:47:53 +00:00
|
|
|
return error_errno("cannot create pipe");
|
2010-02-05 20:57:38 +00:00
|
|
|
}
|
|
|
|
async->in = fdin[1];
|
|
|
|
}
|
|
|
|
|
|
|
|
need_out = async->out < 0;
|
|
|
|
if (need_out) {
|
|
|
|
if (pipe(fdout) < 0) {
|
|
|
|
if (need_in)
|
|
|
|
close_pair(fdin);
|
|
|
|
else if (async->in)
|
|
|
|
close(async->in);
|
2016-05-08 09:47:53 +00:00
|
|
|
return error_errno("cannot create pipe");
|
2010-02-05 20:57:38 +00:00
|
|
|
}
|
|
|
|
async->out = fdout[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (need_in)
|
|
|
|
proc_in = fdin[0];
|
|
|
|
else if (async->in)
|
|
|
|
proc_in = async->in;
|
|
|
|
else
|
|
|
|
proc_in = -1;
|
|
|
|
|
|
|
|
if (need_out)
|
|
|
|
proc_out = fdout[1];
|
|
|
|
else if (async->out)
|
|
|
|
proc_out = async->out;
|
|
|
|
else
|
|
|
|
proc_out = -1;
|
2007-10-19 19:48:00 +00:00
|
|
|
|
2010-03-09 20:00:36 +00:00
|
|
|
#ifdef NO_PTHREADS
|
2008-08-04 00:30:03 +00:00
|
|
|
/* Flush stdio before fork() to avoid cloning buffers */
|
|
|
|
fflush(NULL);
|
|
|
|
|
2007-10-19 19:48:00 +00:00
|
|
|
async->pid = fork();
|
|
|
|
if (async->pid < 0) {
|
2016-05-08 09:47:53 +00:00
|
|
|
error_errno("fork (async) failed");
|
2010-02-05 20:57:38 +00:00
|
|
|
goto error;
|
2007-10-19 19:48:00 +00:00
|
|
|
}
|
|
|
|
if (!async->pid) {
|
2010-02-05 20:57:38 +00:00
|
|
|
if (need_in)
|
|
|
|
close(fdin[1]);
|
|
|
|
if (need_out)
|
|
|
|
close(fdout[0]);
|
2014-10-18 12:31:15 +00:00
|
|
|
git_atexit_clear();
|
2015-09-01 20:22:43 +00:00
|
|
|
process_is_async = 1;
|
2010-02-05 20:57:38 +00:00
|
|
|
exit(!!async->proc(proc_in, proc_out, async->data));
|
2007-10-19 19:48:00 +00:00
|
|
|
}
|
2010-02-05 20:57:38 +00:00
|
|
|
|
2016-10-16 23:20:28 +00:00
|
|
|
mark_child_for_cleanup(async->pid, NULL);
|
run-command: optionally kill children on exit
When we spawn a helper process, it should generally be done
and finish_command called before we exit. However, if we
exit abnormally due to an early return or a signal, the
helper may continue to run in our absence.
In the best case, this may simply be wasted CPU cycles or a
few stray messages on a terminal. But it could also mean a
process that the user thought was aborted continues to run
to completion (e.g., a push's pack-objects helper will
complete the push, even though you killed the push process).
This patch provides infrastructure for run-command to keep
track of PIDs to be killed, and clean them on signal
reception or input, just as we do with tempfiles. PIDs can
be added in two ways:
1. If NO_PTHREADS is defined, async helper processes are
automatically marked. By definition this code must be
ready to die when the parent dies, since it may be
implemented as a thread of the parent process.
2. If the run-command caller specifies the "clean_on_exit"
option. This is not the default, as there are cases
where it is OK for the child to outlive us (e.g., when
spawning a pager).
PIDs are cleared from the kill-list automatically during
wait_or_whine, which is called from finish_command and
finish_async.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Clemens Buchacher <drizzd@aon.at>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2012-01-07 11:42:43 +00:00
|
|
|
|
2010-02-05 20:57:38 +00:00
|
|
|
if (need_in)
|
|
|
|
close(fdin[0]);
|
|
|
|
else if (async->in)
|
|
|
|
close(async->in);
|
|
|
|
|
|
|
|
if (need_out)
|
|
|
|
close(fdout[1]);
|
|
|
|
else if (async->out)
|
|
|
|
close(async->out);
|
2007-12-08 21:19:14 +00:00
|
|
|
#else
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
if (!main_thread_set) {
|
|
|
|
/*
|
|
|
|
* We assume that the first time that start_async is called
|
|
|
|
* it is from the main thread.
|
|
|
|
*/
|
|
|
|
main_thread_set = 1;
|
|
|
|
main_thread = pthread_self();
|
|
|
|
pthread_key_create(&async_key, NULL);
|
run-command: use thread-aware die_is_recursing routine
If we die from an async thread, we do not actually exit the
program, but just kill the thread. This confuses the static
counter in usage.c's default die_is_recursing function; it
updates the counter once for the thread death, and then when
the main program calls die() itself, it erroneously thinks
we are recursing. The end result is that we print "recursion
detected in die handler" instead of the real error in such a
case (the easiest way to trigger this is having a remote
connection hang up while running a sideband demultiplexer).
This patch solves it by using a per-thread counter when the
async_die function is installed; we detect recursion in each
thread (including the main one), but they do not step on
each other's toes.
Other threaded code does not need to worry about this, as
they do not install specialized die handlers; they just let
a die() from a sub-thread take down the whole program.
Since we are overriding the default recursion-check
function, there is an interesting corner case that is not a
problem, but bears some explanation. Imagine the main thread
calls die(), and then in the die_routine starts an async
call. We will switch to using thread-local storage, which
starts at 0, for the main thread's counter, even though
the original counter was actually at 1. That's OK, though,
for two reasons:
1. It would miss only the first level of recursion, and
would still find recursive failures inside the async
helper.
2. We do not currently and are not likely to start doing
anything as heavyweight as starting an async routine
from within a die routine or helper function.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 19:50:07 +00:00
|
|
|
pthread_key_create(&async_die_counter, NULL);
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
set_die_routine(die_async);
|
run-command: use thread-aware die_is_recursing routine
If we die from an async thread, we do not actually exit the
program, but just kill the thread. This confuses the static
counter in usage.c's default die_is_recursing function; it
updates the counter once for the thread death, and then when
the main program calls die() itself, it erroneously thinks
we are recursing. The end result is that we print "recursion
detected in die handler" instead of the real error in such a
case (the easiest way to trigger this is having a remote
connection hang up while running a sideband demultiplexer).
This patch solves it by using a per-thread counter when the
async_die function is installed; we detect recursion in each
thread (including the main one), but they do not step on
each other's toes.
Other threaded code does not need to worry about this, as
they do not install specialized die handlers; they just let
a die() from a sub-thread take down the whole program.
Since we are overriding the default recursion-check
function, there is an interesting corner case that is not a
problem, but bears some explanation. Imagine the main thread
calls die(), and then in the die_routine starts an async
call. We will switch to using thread-local storage, which
starts at 0, for the main thread's counter, even though
the original counter was actually at 1. That's OK, though,
for two reasons:
1. It would miss only the first level of recursion, and
would still find recursive failures inside the async
helper.
2. We do not currently and are not likely to start doing
anything as heavyweight as starting an async routine
from within a die routine or helper function.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2013-04-16 19:50:07 +00:00
|
|
|
set_die_is_recursing_routine(async_die_is_recursing);
|
Dying in an async procedure should only exit the thread, not the process.
Async procedures are intended as helpers that perform a very restricted
task, and the caller usually has to manage them in a larger context.
Conceptually, the async procedure is not concerned with the "bigger
picture" in whose context it is run. When it dies, it is not supposed
to destroy this "bigger picture", but rather only its own limit view
of the world. On POSIX, the async procedure is run in its own process,
and exiting this process naturally had only these limited effects.
On Windows (or when ASYNC_AS_THREAD is set), calling die() exited the
whole process, destroying the caller (the "big picture") as well.
This fixes it to exit only the thread.
Without ASYNC_AS_THREAD, one particular effect of exiting the async
procedure process is that it automatically closes file descriptors, most
notably the writable end of the pipe that the async procedure writes to.
The async API already requires that the async procedure closes the pipe
ends when it exits normally. But for calls to die() no requirements are
imposed. In the non-threaded case the pipe ends are closed implicitly
by the exiting process, but in the threaded case, the die routine must
take care of closing them.
Now t5530-upload-pack-error.sh passes on Windows.
Signed-off-by: Johannes Sixt <j6t@kdbg.org>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2010-03-06 15:40:43 +00:00
|
|
|
}
|
|
|
|
|
2010-03-06 15:40:42 +00:00
|
|
|
if (proc_in >= 0)
|
|
|
|
set_cloexec(proc_in);
|
|
|
|
if (proc_out >= 0)
|
|
|
|
set_cloexec(proc_out);
|
2010-02-05 20:57:38 +00:00
|
|
|
async->proc_in = proc_in;
|
|
|
|
async->proc_out = proc_out;
|
2010-03-06 15:40:42 +00:00
|
|
|
{
|
|
|
|
int err = pthread_create(&async->tid, NULL, run_thread, async);
|
|
|
|
if (err) {
|
2018-11-03 08:48:50 +00:00
|
|
|
error(_("cannot create async thread: %s"), strerror(err));
|
2010-03-06 15:40:42 +00:00
|
|
|
goto error;
|
|
|
|
}
|
2007-12-08 21:19:14 +00:00
|
|
|
}
|
|
|
|
#endif
|
2007-10-19 19:48:00 +00:00
|
|
|
return 0;
|
2010-02-05 20:57:38 +00:00
|
|
|
|
|
|
|
error:
|
|
|
|
if (need_in)
|
|
|
|
close_pair(fdin);
|
|
|
|
else if (async->in)
|
|
|
|
close(async->in);
|
|
|
|
|
|
|
|
if (need_out)
|
|
|
|
close_pair(fdout);
|
|
|
|
else if (async->out)
|
|
|
|
close(async->out);
|
|
|
|
return -1;
|
2007-10-19 19:48:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int finish_async(struct async *async)
|
|
|
|
{
|
2010-03-09 20:00:36 +00:00
|
|
|
#ifdef NO_PTHREADS
|
2021-02-02 21:09:52 +00:00
|
|
|
int ret = wait_or_whine(async->pid, "child process", 0);
|
|
|
|
|
|
|
|
invalidate_lstat_cache();
|
|
|
|
|
|
|
|
return ret;
|
2007-12-08 21:19:14 +00:00
|
|
|
#else
|
2010-03-06 15:40:42 +00:00
|
|
|
void *ret = (void *)(intptr_t)(-1);
|
|
|
|
|
|
|
|
if (pthread_join(async->tid, &ret))
|
|
|
|
error("pthread_join failed");
|
2021-02-02 21:09:52 +00:00
|
|
|
invalidate_lstat_cache();
|
2010-03-06 15:40:42 +00:00
|
|
|
return (int)(intptr_t)ret;
|
2021-02-02 21:09:52 +00:00
|
|
|
|
2007-12-08 21:19:14 +00:00
|
|
|
#endif
|
2007-10-19 19:48:00 +00:00
|
|
|
}
|
2009-01-16 19:09:59 +00:00
|
|
|
|
2018-11-03 08:48:39 +00:00
|
|
|
int async_with_fork(void)
|
|
|
|
{
|
|
|
|
#ifdef NO_PTHREADS
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-06-17 23:38:47 +00:00
|
|
|
struct io_pump {
|
|
|
|
/* initialized by caller */
|
|
|
|
int fd;
|
|
|
|
int type; /* POLLOUT or POLLIN */
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
const char *buf;
|
|
|
|
size_t len;
|
|
|
|
} out;
|
|
|
|
struct {
|
|
|
|
struct strbuf *buf;
|
|
|
|
size_t hint;
|
|
|
|
} in;
|
|
|
|
} u;
|
|
|
|
|
|
|
|
/* returned by pump_io */
|
|
|
|
int error; /* 0 for success, otherwise errno */
|
|
|
|
|
|
|
|
/* internal use */
|
|
|
|
struct pollfd *pfd;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int pump_io_round(struct io_pump *slots, int nr, struct pollfd *pfd)
|
|
|
|
{
|
|
|
|
int pollsize = 0;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
struct io_pump *io = &slots[i];
|
|
|
|
if (io->fd < 0)
|
|
|
|
continue;
|
|
|
|
pfd[pollsize].fd = io->fd;
|
|
|
|
pfd[pollsize].events = io->type;
|
|
|
|
io->pfd = &pfd[pollsize++];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pollsize)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (poll(pfd, pollsize, -1) < 0) {
|
|
|
|
if (errno == EINTR)
|
|
|
|
return 1;
|
|
|
|
die_errno("poll failed");
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
struct io_pump *io = &slots[i];
|
|
|
|
|
|
|
|
if (io->fd < 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!(io->pfd->revents & (POLLOUT|POLLIN|POLLHUP|POLLERR|POLLNVAL)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (io->type == POLLOUT) {
|
pipe_command(): avoid xwrite() for writing to pipe
If xwrite() sees an EAGAIN response, it will loop forever until the
write succeeds (or encounters a real error). This is due to ef1cf0167a
(xwrite: poll on non-blocking FDs, 2016-06-26), with the idea that we
won't be surprised by a descriptor unexpectedly set as non-blocking.
But that will make things awkward when we do want a non-blocking
descriptor, and a future patch will switch pipe_command() to using one.
In that case, looping on EAGAIN is bad, because the process on the other
end of the pipe may be waiting on us before doing another read() on the
pipe, which would mean we deadlock.
In practice we're not supposed to ever see EAGAIN here, since poll()
will have just told us the descriptor is ready for writing. But our
Windows emulation of poll() will always return "ready" for writing to a
pipe descriptor! This is due to 94f4d01932 (mingw: workaround for hangs
when sending STDIN, 2020-02-17).
Our best bet in that case is to keep handling other descriptors, as any
read() we do may allow the child command to make forward progress (i.e.,
its write() finishes, and then it read()s from its stdin, freeing up
space in the pipe buffer). This means we might busy-loop between poll()
and write() on Windows if the child command is slow to read our input,
but it's much better than the alternative of deadlocking.
In practice, this busy-looping should be rare:
- for small inputs, we'll just write the whole thing in a single
write() anyway, non-blocking or not
- for larger inputs where the child reads input and then processes it
before writing (e.g., gpg verifying a signature), we may make a few
extra write() calls that get EAGAIN during the initial write, but
once it has taken in the whole input, we'll correctly block waiting
to read back the data.
- for larger inputs where the child process is streaming output back
(like a diff filter), we'll likewise see some extra EAGAINs, but
most of them will be followed immediately by a read(), which will
let the child command make forward progress.
Of course it won't happen at all for now, since we don't yet use a
non-blocking pipe. This is just preparation for when we do.
Helped-by: René Scharfe <l.s.r@web.de>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-08-17 06:08:06 +00:00
|
|
|
ssize_t len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't use xwrite() here. It loops forever on EAGAIN,
|
|
|
|
* and we're in our own poll() loop here.
|
|
|
|
*
|
|
|
|
* Note that we lose xwrite()'s handling of MAX_IO_SIZE
|
|
|
|
* and EINTR, so we have to implement those ourselves.
|
|
|
|
*/
|
|
|
|
len = write(io->fd, io->u.out.buf,
|
|
|
|
io->u.out.len <= MAX_IO_SIZE ?
|
|
|
|
io->u.out.len : MAX_IO_SIZE);
|
2016-06-17 23:38:47 +00:00
|
|
|
if (len < 0) {
|
2022-08-17 06:09:42 +00:00
|
|
|
if (errno != EINTR && errno != EAGAIN &&
|
|
|
|
errno != ENOSPC) {
|
pipe_command(): avoid xwrite() for writing to pipe
If xwrite() sees an EAGAIN response, it will loop forever until the
write succeeds (or encounters a real error). This is due to ef1cf0167a
(xwrite: poll on non-blocking FDs, 2016-06-26), with the idea that we
won't be surprised by a descriptor unexpectedly set as non-blocking.
But that will make things awkward when we do want a non-blocking
descriptor, and a future patch will switch pipe_command() to using one.
In that case, looping on EAGAIN is bad, because the process on the other
end of the pipe may be waiting on us before doing another read() on the
pipe, which would mean we deadlock.
In practice we're not supposed to ever see EAGAIN here, since poll()
will have just told us the descriptor is ready for writing. But our
Windows emulation of poll() will always return "ready" for writing to a
pipe descriptor! This is due to 94f4d01932 (mingw: workaround for hangs
when sending STDIN, 2020-02-17).
Our best bet in that case is to keep handling other descriptors, as any
read() we do may allow the child command to make forward progress (i.e.,
its write() finishes, and then it read()s from its stdin, freeing up
space in the pipe buffer). This means we might busy-loop between poll()
and write() on Windows if the child command is slow to read our input,
but it's much better than the alternative of deadlocking.
In practice, this busy-looping should be rare:
- for small inputs, we'll just write the whole thing in a single
write() anyway, non-blocking or not
- for larger inputs where the child reads input and then processes it
before writing (e.g., gpg verifying a signature), we may make a few
extra write() calls that get EAGAIN during the initial write, but
once it has taken in the whole input, we'll correctly block waiting
to read back the data.
- for larger inputs where the child process is streaming output back
(like a diff filter), we'll likewise see some extra EAGAINs, but
most of them will be followed immediately by a read(), which will
let the child command make forward progress.
Of course it won't happen at all for now, since we don't yet use a
non-blocking pipe. This is just preparation for when we do.
Helped-by: René Scharfe <l.s.r@web.de>
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-08-17 06:08:06 +00:00
|
|
|
io->error = errno;
|
|
|
|
close(io->fd);
|
|
|
|
io->fd = -1;
|
|
|
|
}
|
2016-06-17 23:38:47 +00:00
|
|
|
} else {
|
|
|
|
io->u.out.buf += len;
|
|
|
|
io->u.out.len -= len;
|
|
|
|
if (!io->u.out.len) {
|
|
|
|
close(io->fd);
|
|
|
|
io->fd = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (io->type == POLLIN) {
|
|
|
|
ssize_t len = strbuf_read_once(io->u.in.buf,
|
|
|
|
io->fd, io->u.in.hint);
|
|
|
|
if (len < 0)
|
|
|
|
io->error = errno;
|
|
|
|
if (len <= 0) {
|
|
|
|
close(io->fd);
|
|
|
|
io->fd = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pump_io(struct io_pump *slots, int nr)
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 03:53:43 +00:00
|
|
|
{
|
2016-06-17 23:38:47 +00:00
|
|
|
struct pollfd *pfd;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nr; i++)
|
|
|
|
slots[i].error = 0;
|
|
|
|
|
|
|
|
ALLOC_ARRAY(pfd, nr);
|
|
|
|
while (pump_io_round(slots, nr, pfd))
|
|
|
|
; /* nothing */
|
|
|
|
free(pfd);
|
|
|
|
|
|
|
|
/* There may be multiple errno values, so just pick the first. */
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
if (slots[i].error) {
|
|
|
|
errno = slots[i].error;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int pipe_command(struct child_process *cmd,
|
|
|
|
const char *in, size_t in_len,
|
|
|
|
struct strbuf *out, size_t out_hint,
|
|
|
|
struct strbuf *err, size_t err_hint)
|
|
|
|
{
|
|
|
|
struct io_pump io[3];
|
|
|
|
int nr = 0;
|
|
|
|
|
|
|
|
if (in)
|
|
|
|
cmd->in = -1;
|
|
|
|
if (out)
|
|
|
|
cmd->out = -1;
|
|
|
|
if (err)
|
|
|
|
cmd->err = -1;
|
|
|
|
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 03:53:43 +00:00
|
|
|
if (start_command(cmd) < 0)
|
|
|
|
return -1;
|
|
|
|
|
2016-06-17 23:38:47 +00:00
|
|
|
if (in) {
|
pipe_command(): mark stdin descriptor as non-blocking
Our pipe_command() helper lets you both write to and read from a child
process on its stdin/stdout. It's supposed to work without deadlocks
because we use poll() to check when descriptors are ready for reading or
writing. But there's a bug: if both the data to be written and the data
to be read back exceed the pipe buffer, we'll deadlock.
The issue is that the code assumes that if you have, say, a 2MB buffer
to write and poll() tells you that the pipe descriptor is ready for
writing, that calling:
write(cmd->in, buf, 2*1024*1024);
will do a partial write, filling the pipe buffer and then returning what
it did write. And that is what it would do on a socket, but not for a
pipe. When writing to a pipe, at least on Linux, it will block waiting
for the child process to read() more. And now we have a potential
deadlock, because the child may be writing back to us, waiting for us to
read() ourselves.
An easy way to trigger this is:
git -c add.interactive.useBuiltin=true \
-c interactive.diffFilter=cat \
checkout -p HEAD~200
The diff against HEAD~200 will be big, and the filter wants to write all
of it back to us (obviously this is a dummy filter, but in the real
world something like diff-highlight would similarly stream back a big
output).
If you set add.interactive.useBuiltin to false, the problem goes away,
because now we're not using pipe_command() anymore (instead, that part
happens in perl). But this isn't a bug in the interactive code at all.
It's the underlying pipe_command() code which is broken, and has been
all along.
We presumably didn't notice because most calls only do input _or_
output, not both. And the few that do both, like gpg calls, may have
large inputs or outputs, but never both at the same time (e.g., consider
signing, which has a large payload but a small signature comes back).
The obvious fix is to put the descriptor into non-blocking mode, and
indeed, that makes the problem go away. Callers shouldn't need to
care, because they never see the descriptor (they hand us a buffer to
feed into it).
The included test fails reliably on Linux without this patch. Curiously,
it doesn't fail in our Windows CI environment, but has been reported to
do so for individual developers. It should pass in any environment after
this patch (courtesy of the compat/ layers added in the last few
commits).
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-08-17 06:10:22 +00:00
|
|
|
if (enable_pipe_nonblock(cmd->in) < 0) {
|
|
|
|
error_errno("unable to make pipe non-blocking");
|
|
|
|
close(cmd->in);
|
|
|
|
if (out)
|
|
|
|
close(cmd->out);
|
|
|
|
if (err)
|
|
|
|
close(cmd->err);
|
|
|
|
return -1;
|
|
|
|
}
|
2016-06-17 23:38:47 +00:00
|
|
|
io[nr].fd = cmd->in;
|
|
|
|
io[nr].type = POLLOUT;
|
|
|
|
io[nr].u.out.buf = in;
|
|
|
|
io[nr].u.out.len = in_len;
|
|
|
|
nr++;
|
|
|
|
}
|
|
|
|
if (out) {
|
|
|
|
io[nr].fd = cmd->out;
|
|
|
|
io[nr].type = POLLIN;
|
|
|
|
io[nr].u.in.buf = out;
|
|
|
|
io[nr].u.in.hint = out_hint;
|
|
|
|
nr++;
|
|
|
|
}
|
|
|
|
if (err) {
|
|
|
|
io[nr].fd = cmd->err;
|
|
|
|
io[nr].type = POLLIN;
|
|
|
|
io[nr].u.in.buf = err;
|
|
|
|
io[nr].u.in.hint = err_hint;
|
|
|
|
nr++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (pump_io(io, nr) < 0) {
|
run-command: introduce capture_command helper
Something as simple as reading the stdout from a command
turns out to be rather hard to do right. Doing:
cmd.out = -1;
run_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
can result in deadlock if the child process produces a large
amount of output. What happens is:
1. The parent spawns the child with its stdout connected
to a pipe, of which the parent is the sole reader.
2. The parent calls wait(), blocking until the child exits.
3. The child writes to stdout. If it writes more data than
the OS pipe buffer can hold, the write() call will
block.
This is a deadlock; the parent is waiting for the child to
exit, and the child is waiting for the parent to call
read().
So we might try instead:
start_command(&cmd);
strbuf_read(&buf, cmd.out, 0);
finish_command(&cmd);
But that is not quite right either. We are examining cmd.out
and running finish_command whether start_command succeeded
or not, which is wrong. Moreover, these snippets do not do
any error handling. If our read() fails, we must make sure
to still call finish_command (to reap the child process).
And both snippets failed to close the cmd.out descriptor,
which they must do (provided start_command succeeded).
Let's introduce a run-command helper that can make this a
bit simpler for callers to get right.
Signed-off-by: Jeff King <peff@peff.net>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-03-23 03:53:43 +00:00
|
|
|
finish_command(cmd); /* throw away exit code */
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return finish_command(cmd);
|
|
|
|
}
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
|
|
|
enum child_state {
|
|
|
|
GIT_CP_FREE,
|
|
|
|
GIT_CP_WORKING,
|
|
|
|
GIT_CP_WAIT_CLEANUP,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct parallel_processes {
|
run-command API: make "n" parameter a "size_t"
Make the "n" variable added in c553c72eed6 (run-command: add an
asynchronous parallel child processor, 2015-12-15) a "size_t". As
we'll see in a subsequent commit we do pass "0" here, but never "jobs
< 0".
We could have made it an "unsigned int", but as we're having to change
this let's not leave another case in the codebase where a size_t and
"unsigned int" size differ on some platforms. In this case it's likely
to never matter, but it's easier to not need to worry about it.
After this and preceding changes:
make run-command.o DEVOPTS=extra-all CFLAGS=-Wno-unused-parameter
Only has one (and new) -Wsigned-compare warning relevant to a
comparison about our "n" or "{nr,max}_processes": About using our
"n" (size_t) in the same expression as online_cpus() (int). A
subsequent commit will adjust & deal with online_cpus() and that
warning.
The only users of the "n" parameter are:
* builtin/fetch.c: defaults to 1, reads from the "fetch.parallel"
config. As seen in the code that parses the config added in
d54dea77dba (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) will die if the git_config_int() return value is < 0.
It will however pass us n = 0, as we'll see in a subsequent commit.
* submodule.c: defaults to 1, reads from "submodule.fetchJobs"
config. Read via code originally added in a028a1930c6 (fetching
submodules: respect `submodule.fetchJobs` config option, 2016-02-29).
It now piggy-backs on the the submodule.fetchJobs code and
validation added in f20e7c1ea24 (submodule: remove
submodule.fetchjobs from submodule-config parsing, 2017-08-02).
Like builtin/fetch.c it will die if the git_config_int() return
value is < 0, but like builtin/fetch.c it will pass us n = 0.
* builtin/submodule--helper.c: defaults to 1. Read via code
originally added in 2335b870fa7 (submodule update: expose parallelism
to the user, 2016-02-29).
Since f20e7c1ea24 (submodule: remove submodule.fetchjobs from
submodule-config parsing, 2017-08-02) it shares a config parser and
semantics with the submodule.c caller.
* hook.c: hardcoded to 1, see 96e7225b310 (hook: add 'run'
subcommand, 2021-12-22).
* t/helper/test-run-command.c: can be -1 after parsing the arguments,
but will then be overridden to online_cpus() before passing it to
this API. See be5d88e1128 (test-tool run-command: learn to run (parts
of) the testsuite, 2019-10-04).
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-10-12 21:02:23 +00:00
|
|
|
size_t nr_processes;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
|
|
|
struct {
|
|
|
|
enum child_state state;
|
|
|
|
struct child_process process;
|
|
|
|
struct strbuf err;
|
|
|
|
void *data;
|
|
|
|
} *children;
|
|
|
|
/*
|
|
|
|
* The struct pollfd is logically part of *children,
|
|
|
|
* but the system call expects it as its own array.
|
|
|
|
*/
|
|
|
|
struct pollfd *pfd;
|
|
|
|
|
|
|
|
unsigned shutdown : 1;
|
|
|
|
|
run-command API: make "n" parameter a "size_t"
Make the "n" variable added in c553c72eed6 (run-command: add an
asynchronous parallel child processor, 2015-12-15) a "size_t". As
we'll see in a subsequent commit we do pass "0" here, but never "jobs
< 0".
We could have made it an "unsigned int", but as we're having to change
this let's not leave another case in the codebase where a size_t and
"unsigned int" size differ on some platforms. In this case it's likely
to never matter, but it's easier to not need to worry about it.
After this and preceding changes:
make run-command.o DEVOPTS=extra-all CFLAGS=-Wno-unused-parameter
Only has one (and new) -Wsigned-compare warning relevant to a
comparison about our "n" or "{nr,max}_processes": About using our
"n" (size_t) in the same expression as online_cpus() (int). A
subsequent commit will adjust & deal with online_cpus() and that
warning.
The only users of the "n" parameter are:
* builtin/fetch.c: defaults to 1, reads from the "fetch.parallel"
config. As seen in the code that parses the config added in
d54dea77dba (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) will die if the git_config_int() return value is < 0.
It will however pass us n = 0, as we'll see in a subsequent commit.
* submodule.c: defaults to 1, reads from "submodule.fetchJobs"
config. Read via code originally added in a028a1930c6 (fetching
submodules: respect `submodule.fetchJobs` config option, 2016-02-29).
It now piggy-backs on the the submodule.fetchJobs code and
validation added in f20e7c1ea24 (submodule: remove
submodule.fetchjobs from submodule-config parsing, 2017-08-02).
Like builtin/fetch.c it will die if the git_config_int() return
value is < 0, but like builtin/fetch.c it will pass us n = 0.
* builtin/submodule--helper.c: defaults to 1. Read via code
originally added in 2335b870fa7 (submodule update: expose parallelism
to the user, 2016-02-29).
Since f20e7c1ea24 (submodule: remove submodule.fetchjobs from
submodule-config parsing, 2017-08-02) it shares a config parser and
semantics with the submodule.c caller.
* hook.c: hardcoded to 1, see 96e7225b310 (hook: add 'run'
subcommand, 2021-12-22).
* t/helper/test-run-command.c: can be -1 after parsing the arguments,
but will then be overridden to online_cpus() before passing it to
this API. See be5d88e1128 (test-tool run-command: learn to run (parts
of) the testsuite, 2019-10-04).
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-10-12 21:02:23 +00:00
|
|
|
size_t output_owner;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
struct strbuf buffered_output; /* of finished children */
|
|
|
|
};
|
|
|
|
|
2022-10-12 21:02:34 +00:00
|
|
|
struct parallel_processes_for_signal {
|
|
|
|
const struct run_process_parallel_opts *opts;
|
|
|
|
const struct parallel_processes *pp;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void kill_children(const struct parallel_processes *pp,
|
|
|
|
const struct run_process_parallel_opts *opts,
|
|
|
|
int signo)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
2022-10-12 21:02:34 +00:00
|
|
|
for (size_t i = 0; i < opts->processes; i++)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (pp->children[i].state == GIT_CP_WORKING)
|
|
|
|
kill(pp->children[i].process.pid, signo);
|
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:34 +00:00
|
|
|
static void kill_children_signal(const struct parallel_processes_for_signal *pp_sig,
|
|
|
|
int signo)
|
|
|
|
{
|
|
|
|
kill_children(pp_sig->pp, pp_sig->opts, signo);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct parallel_processes_for_signal *pp_for_signal;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
|
|
|
static void handle_children_on_signal(int signo)
|
|
|
|
{
|
2022-10-12 21:02:34 +00:00
|
|
|
kill_children_signal(pp_for_signal, signo);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
sigchain_pop(signo);
|
|
|
|
raise(signo);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pp_init(struct parallel_processes *pp,
|
2022-10-12 21:02:34 +00:00
|
|
|
const struct run_process_parallel_opts *opts,
|
|
|
|
struct parallel_processes_for_signal *pp_sig)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
2022-10-12 21:02:26 +00:00
|
|
|
const size_t n = opts->processes;
|
2022-10-12 21:02:25 +00:00
|
|
|
|
2022-10-12 21:02:24 +00:00
|
|
|
if (!n)
|
|
|
|
BUG("you must provide a non-zero number of processes!");
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
run-command API: make "n" parameter a "size_t"
Make the "n" variable added in c553c72eed6 (run-command: add an
asynchronous parallel child processor, 2015-12-15) a "size_t". As
we'll see in a subsequent commit we do pass "0" here, but never "jobs
< 0".
We could have made it an "unsigned int", but as we're having to change
this let's not leave another case in the codebase where a size_t and
"unsigned int" size differ on some platforms. In this case it's likely
to never matter, but it's easier to not need to worry about it.
After this and preceding changes:
make run-command.o DEVOPTS=extra-all CFLAGS=-Wno-unused-parameter
Only has one (and new) -Wsigned-compare warning relevant to a
comparison about our "n" or "{nr,max}_processes": About using our
"n" (size_t) in the same expression as online_cpus() (int). A
subsequent commit will adjust & deal with online_cpus() and that
warning.
The only users of the "n" parameter are:
* builtin/fetch.c: defaults to 1, reads from the "fetch.parallel"
config. As seen in the code that parses the config added in
d54dea77dba (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) will die if the git_config_int() return value is < 0.
It will however pass us n = 0, as we'll see in a subsequent commit.
* submodule.c: defaults to 1, reads from "submodule.fetchJobs"
config. Read via code originally added in a028a1930c6 (fetching
submodules: respect `submodule.fetchJobs` config option, 2016-02-29).
It now piggy-backs on the the submodule.fetchJobs code and
validation added in f20e7c1ea24 (submodule: remove
submodule.fetchjobs from submodule-config parsing, 2017-08-02).
Like builtin/fetch.c it will die if the git_config_int() return
value is < 0, but like builtin/fetch.c it will pass us n = 0.
* builtin/submodule--helper.c: defaults to 1. Read via code
originally added in 2335b870fa7 (submodule update: expose parallelism
to the user, 2016-02-29).
Since f20e7c1ea24 (submodule: remove submodule.fetchjobs from
submodule-config parsing, 2017-08-02) it shares a config parser and
semantics with the submodule.c caller.
* hook.c: hardcoded to 1, see 96e7225b310 (hook: add 'run'
subcommand, 2021-12-22).
* t/helper/test-run-command.c: can be -1 after parsing the arguments,
but will then be overridden to online_cpus() before passing it to
this API. See be5d88e1128 (test-tool run-command: learn to run (parts
of) the testsuite, 2019-10-04).
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-10-12 21:02:23 +00:00
|
|
|
trace_printf("run_processes_parallel: preparing to run up to %"PRIuMAX" tasks",
|
|
|
|
(uintmax_t)n);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
2022-10-12 21:02:29 +00:00
|
|
|
if (!opts->get_next_task)
|
2018-05-02 09:38:39 +00:00
|
|
|
BUG("you need to specify a get_next_task function");
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
2021-03-13 16:17:22 +00:00
|
|
|
CALLOC_ARRAY(pp->children, n);
|
2022-10-12 21:02:30 +00:00
|
|
|
if (!opts->ungroup)
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
CALLOC_ARRAY(pp->pfd, n);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
run-command API: make "n" parameter a "size_t"
Make the "n" variable added in c553c72eed6 (run-command: add an
asynchronous parallel child processor, 2015-12-15) a "size_t". As
we'll see in a subsequent commit we do pass "0" here, but never "jobs
< 0".
We could have made it an "unsigned int", but as we're having to change
this let's not leave another case in the codebase where a size_t and
"unsigned int" size differ on some platforms. In this case it's likely
to never matter, but it's easier to not need to worry about it.
After this and preceding changes:
make run-command.o DEVOPTS=extra-all CFLAGS=-Wno-unused-parameter
Only has one (and new) -Wsigned-compare warning relevant to a
comparison about our "n" or "{nr,max}_processes": About using our
"n" (size_t) in the same expression as online_cpus() (int). A
subsequent commit will adjust & deal with online_cpus() and that
warning.
The only users of the "n" parameter are:
* builtin/fetch.c: defaults to 1, reads from the "fetch.parallel"
config. As seen in the code that parses the config added in
d54dea77dba (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) will die if the git_config_int() return value is < 0.
It will however pass us n = 0, as we'll see in a subsequent commit.
* submodule.c: defaults to 1, reads from "submodule.fetchJobs"
config. Read via code originally added in a028a1930c6 (fetching
submodules: respect `submodule.fetchJobs` config option, 2016-02-29).
It now piggy-backs on the the submodule.fetchJobs code and
validation added in f20e7c1ea24 (submodule: remove
submodule.fetchjobs from submodule-config parsing, 2017-08-02).
Like builtin/fetch.c it will die if the git_config_int() return
value is < 0, but like builtin/fetch.c it will pass us n = 0.
* builtin/submodule--helper.c: defaults to 1. Read via code
originally added in 2335b870fa7 (submodule update: expose parallelism
to the user, 2016-02-29).
Since f20e7c1ea24 (submodule: remove submodule.fetchjobs from
submodule-config parsing, 2017-08-02) it shares a config parser and
semantics with the submodule.c caller.
* hook.c: hardcoded to 1, see 96e7225b310 (hook: add 'run'
subcommand, 2021-12-22).
* t/helper/test-run-command.c: can be -1 after parsing the arguments,
but will then be overridden to online_cpus() before passing it to
this API. See be5d88e1128 (test-tool run-command: learn to run (parts
of) the testsuite, 2019-10-04).
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-10-12 21:02:23 +00:00
|
|
|
for (size_t i = 0; i < n; i++) {
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
strbuf_init(&pp->children[i].err, 0);
|
|
|
|
child_process_init(&pp->children[i].process);
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
if (pp->pfd) {
|
|
|
|
pp->pfd[i].events = POLLIN | POLLHUP;
|
|
|
|
pp->pfd[i].fd = -1;
|
|
|
|
}
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:34 +00:00
|
|
|
pp_sig->pp = pp;
|
|
|
|
pp_sig->opts = opts;
|
|
|
|
pp_for_signal = pp_sig;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
sigchain_push_common(handle_children_on_signal);
|
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:33 +00:00
|
|
|
static void pp_cleanup(struct parallel_processes *pp,
|
|
|
|
const struct run_process_parallel_opts *opts)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
|
|
|
trace_printf("run_processes_parallel: done");
|
2022-10-12 21:02:33 +00:00
|
|
|
for (size_t i = 0; i < opts->processes; i++) {
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
strbuf_release(&pp->children[i].err);
|
|
|
|
child_process_clear(&pp->children[i].process);
|
|
|
|
}
|
|
|
|
|
|
|
|
free(pp->children);
|
|
|
|
free(pp->pfd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When get_next_task added messages to the buffer in its last
|
|
|
|
* iteration, the buffered output is non empty.
|
|
|
|
*/
|
2016-03-01 02:07:15 +00:00
|
|
|
strbuf_write(&pp->buffered_output, stderr);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
strbuf_release(&pp->buffered_output);
|
|
|
|
|
|
|
|
sigchain_pop_common();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns
|
|
|
|
* 0 if a new task was started.
|
|
|
|
* 1 if no new jobs was started (get_next_task ran out of work, non critical
|
|
|
|
* problem with starting a new command)
|
|
|
|
* <0 no new job was started, user wishes to shutdown early. Use negative code
|
|
|
|
* to signal the children.
|
|
|
|
*/
|
2022-10-12 21:02:29 +00:00
|
|
|
static int pp_start_one(struct parallel_processes *pp,
|
|
|
|
const struct run_process_parallel_opts *opts)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
run-command API: make "n" parameter a "size_t"
Make the "n" variable added in c553c72eed6 (run-command: add an
asynchronous parallel child processor, 2015-12-15) a "size_t". As
we'll see in a subsequent commit we do pass "0" here, but never "jobs
< 0".
We could have made it an "unsigned int", but as we're having to change
this let's not leave another case in the codebase where a size_t and
"unsigned int" size differ on some platforms. In this case it's likely
to never matter, but it's easier to not need to worry about it.
After this and preceding changes:
make run-command.o DEVOPTS=extra-all CFLAGS=-Wno-unused-parameter
Only has one (and new) -Wsigned-compare warning relevant to a
comparison about our "n" or "{nr,max}_processes": About using our
"n" (size_t) in the same expression as online_cpus() (int). A
subsequent commit will adjust & deal with online_cpus() and that
warning.
The only users of the "n" parameter are:
* builtin/fetch.c: defaults to 1, reads from the "fetch.parallel"
config. As seen in the code that parses the config added in
d54dea77dba (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) will die if the git_config_int() return value is < 0.
It will however pass us n = 0, as we'll see in a subsequent commit.
* submodule.c: defaults to 1, reads from "submodule.fetchJobs"
config. Read via code originally added in a028a1930c6 (fetching
submodules: respect `submodule.fetchJobs` config option, 2016-02-29).
It now piggy-backs on the the submodule.fetchJobs code and
validation added in f20e7c1ea24 (submodule: remove
submodule.fetchjobs from submodule-config parsing, 2017-08-02).
Like builtin/fetch.c it will die if the git_config_int() return
value is < 0, but like builtin/fetch.c it will pass us n = 0.
* builtin/submodule--helper.c: defaults to 1. Read via code
originally added in 2335b870fa7 (submodule update: expose parallelism
to the user, 2016-02-29).
Since f20e7c1ea24 (submodule: remove submodule.fetchjobs from
submodule-config parsing, 2017-08-02) it shares a config parser and
semantics with the submodule.c caller.
* hook.c: hardcoded to 1, see 96e7225b310 (hook: add 'run'
subcommand, 2021-12-22).
* t/helper/test-run-command.c: can be -1 after parsing the arguments,
but will then be overridden to online_cpus() before passing it to
this API. See be5d88e1128 (test-tool run-command: learn to run (parts
of) the testsuite, 2019-10-04).
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-10-12 21:02:23 +00:00
|
|
|
size_t i;
|
|
|
|
int code;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
2022-10-12 21:02:32 +00:00
|
|
|
for (i = 0; i < opts->processes; i++)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (pp->children[i].state == GIT_CP_FREE)
|
|
|
|
break;
|
2022-10-12 21:02:32 +00:00
|
|
|
if (i == opts->processes)
|
2018-05-02 09:38:39 +00:00
|
|
|
BUG("bookkeeping is hard");
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
2023-02-08 19:21:12 +00:00
|
|
|
/*
|
|
|
|
* By default, do not inherit stdin from the parent process - otherwise,
|
|
|
|
* all children would share stdin! Users may overwrite this to provide
|
|
|
|
* something to the child's stdin by having their 'get_next_task'
|
|
|
|
* callback assign 0 to .no_stdin and an appropriate integer to .in.
|
|
|
|
*/
|
|
|
|
pp->children[i].process.no_stdin = 1;
|
|
|
|
|
2022-10-12 21:02:29 +00:00
|
|
|
code = opts->get_next_task(&pp->children[i].process,
|
2022-10-12 21:02:30 +00:00
|
|
|
opts->ungroup ? NULL : &pp->children[i].err,
|
2022-10-12 21:02:31 +00:00
|
|
|
opts->data,
|
2022-10-12 21:02:29 +00:00
|
|
|
&pp->children[i].data);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (!code) {
|
2022-10-12 21:02:30 +00:00
|
|
|
if (!opts->ungroup) {
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
|
|
|
|
strbuf_reset(&pp->children[i].err);
|
|
|
|
}
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
return 1;
|
|
|
|
}
|
2022-10-12 21:02:30 +00:00
|
|
|
if (!opts->ungroup) {
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
pp->children[i].process.err = -1;
|
|
|
|
pp->children[i].process.stdout_to_stderr = 1;
|
|
|
|
}
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
|
|
|
if (start_command(&pp->children[i].process)) {
|
2022-10-12 21:02:29 +00:00
|
|
|
if (opts->start_failure)
|
2022-10-12 21:02:30 +00:00
|
|
|
code = opts->start_failure(opts->ungroup ? NULL :
|
2022-10-12 21:02:29 +00:00
|
|
|
&pp->children[i].err,
|
2022-10-12 21:02:31 +00:00
|
|
|
opts->data,
|
2022-10-12 21:02:29 +00:00
|
|
|
pp->children[i].data);
|
|
|
|
else
|
|
|
|
code = 0;
|
|
|
|
|
2022-10-12 21:02:30 +00:00
|
|
|
if (!opts->ungroup) {
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
|
|
|
|
strbuf_reset(&pp->children[i].err);
|
|
|
|
}
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (code)
|
|
|
|
pp->shutdown = 1;
|
|
|
|
return code;
|
|
|
|
}
|
|
|
|
|
|
|
|
pp->nr_processes++;
|
|
|
|
pp->children[i].state = GIT_CP_WORKING;
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
if (pp->pfd)
|
|
|
|
pp->pfd[i].fd = pp->children[i].process.err;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:33 +00:00
|
|
|
static void pp_buffer_stderr(struct parallel_processes *pp,
|
|
|
|
const struct run_process_parallel_opts *opts,
|
|
|
|
int output_timeout)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
2023-02-08 19:21:11 +00:00
|
|
|
while (poll(pp->pfd, opts->processes, output_timeout) < 0) {
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (errno == EINTR)
|
|
|
|
continue;
|
2022-10-12 21:02:33 +00:00
|
|
|
pp_cleanup(pp, opts);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
die_errno("poll");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Buffer output from all pipes. */
|
2022-10-12 21:02:33 +00:00
|
|
|
for (size_t i = 0; i < opts->processes; i++) {
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (pp->children[i].state == GIT_CP_WORKING &&
|
|
|
|
pp->pfd[i].revents & (POLLIN | POLLHUP)) {
|
|
|
|
int n = strbuf_read_once(&pp->children[i].err,
|
|
|
|
pp->children[i].process.err, 0);
|
|
|
|
if (n == 0) {
|
|
|
|
close(pp->children[i].process.err);
|
|
|
|
pp->children[i].state = GIT_CP_WAIT_CLEANUP;
|
|
|
|
} else if (n < 0)
|
|
|
|
if (errno != EAGAIN)
|
|
|
|
die_errno("read");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:28 +00:00
|
|
|
static void pp_output(const struct parallel_processes *pp)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
run-command API: make "n" parameter a "size_t"
Make the "n" variable added in c553c72eed6 (run-command: add an
asynchronous parallel child processor, 2015-12-15) a "size_t". As
we'll see in a subsequent commit we do pass "0" here, but never "jobs
< 0".
We could have made it an "unsigned int", but as we're having to change
this let's not leave another case in the codebase where a size_t and
"unsigned int" size differ on some platforms. In this case it's likely
to never matter, but it's easier to not need to worry about it.
After this and preceding changes:
make run-command.o DEVOPTS=extra-all CFLAGS=-Wno-unused-parameter
Only has one (and new) -Wsigned-compare warning relevant to a
comparison about our "n" or "{nr,max}_processes": About using our
"n" (size_t) in the same expression as online_cpus() (int). A
subsequent commit will adjust & deal with online_cpus() and that
warning.
The only users of the "n" parameter are:
* builtin/fetch.c: defaults to 1, reads from the "fetch.parallel"
config. As seen in the code that parses the config added in
d54dea77dba (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) will die if the git_config_int() return value is < 0.
It will however pass us n = 0, as we'll see in a subsequent commit.
* submodule.c: defaults to 1, reads from "submodule.fetchJobs"
config. Read via code originally added in a028a1930c6 (fetching
submodules: respect `submodule.fetchJobs` config option, 2016-02-29).
It now piggy-backs on the the submodule.fetchJobs code and
validation added in f20e7c1ea24 (submodule: remove
submodule.fetchjobs from submodule-config parsing, 2017-08-02).
Like builtin/fetch.c it will die if the git_config_int() return
value is < 0, but like builtin/fetch.c it will pass us n = 0.
* builtin/submodule--helper.c: defaults to 1. Read via code
originally added in 2335b870fa7 (submodule update: expose parallelism
to the user, 2016-02-29).
Since f20e7c1ea24 (submodule: remove submodule.fetchjobs from
submodule-config parsing, 2017-08-02) it shares a config parser and
semantics with the submodule.c caller.
* hook.c: hardcoded to 1, see 96e7225b310 (hook: add 'run'
subcommand, 2021-12-22).
* t/helper/test-run-command.c: can be -1 after parsing the arguments,
but will then be overridden to online_cpus() before passing it to
this API. See be5d88e1128 (test-tool run-command: learn to run (parts
of) the testsuite, 2019-10-04).
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-10-12 21:02:23 +00:00
|
|
|
size_t i = pp->output_owner;
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (pp->children[i].state == GIT_CP_WORKING &&
|
|
|
|
pp->children[i].err.len) {
|
2016-03-01 02:07:15 +00:00
|
|
|
strbuf_write(&pp->children[i].err, stderr);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
strbuf_reset(&pp->children[i].err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:29 +00:00
|
|
|
static int pp_collect_finished(struct parallel_processes *pp,
|
|
|
|
const struct run_process_parallel_opts *opts)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
run-command API: make "n" parameter a "size_t"
Make the "n" variable added in c553c72eed6 (run-command: add an
asynchronous parallel child processor, 2015-12-15) a "size_t". As
we'll see in a subsequent commit we do pass "0" here, but never "jobs
< 0".
We could have made it an "unsigned int", but as we're having to change
this let's not leave another case in the codebase where a size_t and
"unsigned int" size differ on some platforms. In this case it's likely
to never matter, but it's easier to not need to worry about it.
After this and preceding changes:
make run-command.o DEVOPTS=extra-all CFLAGS=-Wno-unused-parameter
Only has one (and new) -Wsigned-compare warning relevant to a
comparison about our "n" or "{nr,max}_processes": About using our
"n" (size_t) in the same expression as online_cpus() (int). A
subsequent commit will adjust & deal with online_cpus() and that
warning.
The only users of the "n" parameter are:
* builtin/fetch.c: defaults to 1, reads from the "fetch.parallel"
config. As seen in the code that parses the config added in
d54dea77dba (fetch: let --jobs=<n> parallelize --multiple, too,
2019-10-05) will die if the git_config_int() return value is < 0.
It will however pass us n = 0, as we'll see in a subsequent commit.
* submodule.c: defaults to 1, reads from "submodule.fetchJobs"
config. Read via code originally added in a028a1930c6 (fetching
submodules: respect `submodule.fetchJobs` config option, 2016-02-29).
It now piggy-backs on the the submodule.fetchJobs code and
validation added in f20e7c1ea24 (submodule: remove
submodule.fetchjobs from submodule-config parsing, 2017-08-02).
Like builtin/fetch.c it will die if the git_config_int() return
value is < 0, but like builtin/fetch.c it will pass us n = 0.
* builtin/submodule--helper.c: defaults to 1. Read via code
originally added in 2335b870fa7 (submodule update: expose parallelism
to the user, 2016-02-29).
Since f20e7c1ea24 (submodule: remove submodule.fetchjobs from
submodule-config parsing, 2017-08-02) it shares a config parser and
semantics with the submodule.c caller.
* hook.c: hardcoded to 1, see 96e7225b310 (hook: add 'run'
subcommand, 2021-12-22).
* t/helper/test-run-command.c: can be -1 after parsing the arguments,
but will then be overridden to online_cpus() before passing it to
this API. See be5d88e1128 (test-tool run-command: learn to run (parts
of) the testsuite, 2019-10-04).
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-10-12 21:02:23 +00:00
|
|
|
int code;
|
2022-10-12 21:02:32 +00:00
|
|
|
size_t i;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
int result = 0;
|
|
|
|
|
|
|
|
while (pp->nr_processes > 0) {
|
2022-10-12 21:02:32 +00:00
|
|
|
for (i = 0; i < opts->processes; i++)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (pp->children[i].state == GIT_CP_WAIT_CLEANUP)
|
|
|
|
break;
|
2022-10-12 21:02:32 +00:00
|
|
|
if (i == opts->processes)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
code = finish_command(&pp->children[i].process);
|
|
|
|
|
2022-10-12 21:02:29 +00:00
|
|
|
if (opts->task_finished)
|
2022-10-12 21:02:30 +00:00
|
|
|
code = opts->task_finished(code, opts->ungroup ? NULL :
|
2022-10-12 21:02:31 +00:00
|
|
|
&pp->children[i].err, opts->data,
|
2022-10-12 21:02:29 +00:00
|
|
|
pp->children[i].data);
|
|
|
|
else
|
|
|
|
code = 0;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
|
|
|
if (code)
|
|
|
|
result = code;
|
|
|
|
if (code < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
pp->nr_processes--;
|
|
|
|
pp->children[i].state = GIT_CP_FREE;
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
if (pp->pfd)
|
|
|
|
pp->pfd[i].fd = -1;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
child_process_init(&pp->children[i].process);
|
|
|
|
|
2022-10-12 21:02:30 +00:00
|
|
|
if (opts->ungroup) {
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
; /* no strbuf_*() work to do here */
|
|
|
|
} else if (i != pp->output_owner) {
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
strbuf_addbuf(&pp->buffered_output, &pp->children[i].err);
|
|
|
|
strbuf_reset(&pp->children[i].err);
|
|
|
|
} else {
|
2022-10-12 21:02:32 +00:00
|
|
|
const size_t n = opts->processes;
|
|
|
|
|
2016-03-01 02:07:15 +00:00
|
|
|
strbuf_write(&pp->children[i].err, stderr);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
strbuf_reset(&pp->children[i].err);
|
|
|
|
|
|
|
|
/* Output all other finished child processes */
|
2016-03-01 02:07:15 +00:00
|
|
|
strbuf_write(&pp->buffered_output, stderr);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
strbuf_reset(&pp->buffered_output);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Pick next process to output live.
|
|
|
|
* NEEDSWORK:
|
|
|
|
* For now we pick it randomly by doing a round
|
|
|
|
* robin. Later we may want to pick the one with
|
|
|
|
* the most output or the longest or shortest
|
|
|
|
* running process time.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < n; i++)
|
|
|
|
if (pp->children[(pp->output_owner + i) % n].state == GIT_CP_WORKING)
|
|
|
|
break;
|
|
|
|
pp->output_owner = (pp->output_owner + i) % n;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:26 +00:00
|
|
|
void run_processes_parallel(const struct run_process_parallel_opts *opts)
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
{
|
|
|
|
int i, code;
|
|
|
|
int output_timeout = 100;
|
|
|
|
int spawn_cap = 4;
|
2022-10-12 21:02:34 +00:00
|
|
|
struct parallel_processes_for_signal pp_sig;
|
2022-10-12 21:02:25 +00:00
|
|
|
struct parallel_processes pp = {
|
|
|
|
.buffered_output = STRBUF_INIT,
|
|
|
|
};
|
2022-10-12 21:02:26 +00:00
|
|
|
/* options */
|
|
|
|
const char *tr2_category = opts->tr2_category;
|
|
|
|
const char *tr2_label = opts->tr2_label;
|
|
|
|
const int do_trace2 = tr2_category && tr2_label;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
|
2022-10-12 21:02:26 +00:00
|
|
|
if (do_trace2)
|
|
|
|
trace2_region_enter_printf(tr2_category, tr2_label, NULL,
|
|
|
|
"max:%d", opts->processes);
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
|
2022-10-12 21:02:34 +00:00
|
|
|
pp_init(&pp, opts, &pp_sig);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
while (1) {
|
|
|
|
for (i = 0;
|
|
|
|
i < spawn_cap && !pp.shutdown &&
|
2022-10-12 21:02:32 +00:00
|
|
|
pp.nr_processes < opts->processes;
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
i++) {
|
2022-10-12 21:02:29 +00:00
|
|
|
code = pp_start_one(&pp, opts);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (!code)
|
|
|
|
continue;
|
|
|
|
if (code < 0) {
|
|
|
|
pp.shutdown = 1;
|
2022-10-12 21:02:34 +00:00
|
|
|
kill_children(&pp, opts, -code);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!pp.nr_processes)
|
|
|
|
break;
|
2022-10-12 21:02:26 +00:00
|
|
|
if (opts->ungroup) {
|
2022-10-12 21:02:32 +00:00
|
|
|
for (size_t i = 0; i < opts->processes; i++)
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
pp.children[i].state = GIT_CP_WAIT_CLEANUP;
|
|
|
|
} else {
|
2022-10-12 21:02:33 +00:00
|
|
|
pp_buffer_stderr(&pp, opts, output_timeout);
|
run-command: add an "ungroup" option to run_process_parallel()
Extend the parallel execution API added in c553c72eed6 (run-command:
add an asynchronous parallel child processor, 2015-12-15) to support a
mode where the stdout and stderr of the processes isn't captured and
output in a deterministic order, instead we'll leave it to the kernel
and stdio to sort it out.
This gives the API same functionality as GNU parallel's --ungroup
option. As we'll see in a subsequent commit the main reason to want
this is to support stdout and stderr being connected to the TTY in the
case of jobs=1, demonstrated here with GNU parallel:
$ parallel --ungroup 'test -t {} && echo TTY || echo NTTY' ::: 1 2
TTY
TTY
$ parallel 'test -t {} && echo TTY || echo NTTY' ::: 1 2
NTTY
NTTY
Another is as GNU parallel's documentation notes a potential for
optimization. As demonstrated in next commit our results with "git
hook run" will be similar, but generally speaking this shows that if
you want to run processes in parallel where the exact order isn't
important this can be a lot faster:
$ hyperfine -r 3 -L o ,--ungroup 'parallel {o} seq ::: 10000000 >/dev/null '
Benchmark 1: parallel seq ::: 10000000 >/dev/null
Time (mean ± σ): 220.2 ms ± 9.3 ms [User: 124.9 ms, System: 96.1 ms]
Range (min … max): 212.3 ms … 230.5 ms 3 runs
Benchmark 2: parallel --ungroup seq ::: 10000000 >/dev/null
Time (mean ± σ): 154.7 ms ± 0.9 ms [User: 136.2 ms, System: 25.1 ms]
Range (min … max): 153.9 ms … 155.7 ms 3 runs
Summary
'parallel --ungroup seq ::: 10000000 >/dev/null ' ran
1.42 ± 0.06 times faster than 'parallel seq ::: 10000000 >/dev/null '
A large part of the juggling in the API is to make the API safer for
its maintenance and consumers alike.
For the maintenance of the API we e.g. avoid malloc()-ing the
"pp->pfd", ensuring that SANITIZE=address and other similar tools will
catch any unexpected misuse.
For API consumers we take pains to never pass the non-NULL "out"
buffer to an API user that provided the "ungroup" option. The
resulting code in t/helper/test-run-command.c isn't typical of such a
user, i.e. they'd typically use one mode or the other, and would know
whether they'd provided "ungroup" or not.
We could also avoid the strbuf_init() for "buffered_output" by having
"struct parallel_processes" use a static PARALLEL_PROCESSES_INIT
initializer, but let's leave that cleanup for later.
Using a global "run_processes_parallel_ungroup" variable to enable
this option is rather nasty, but is being done here to produce as
minimal of a change as possible for a subsequent regression fix. This
change is extracted from a larger initial version[1] which ends up
with a better end-state for the API, but in doing so needed to modify
all existing callers of the API. Let's defer that for now, and
narrowly focus on what we need for fixing the regression in the
subsequent commit.
It's safe to do this with a global variable because:
A) hook.c is the only user of it that sets it to non-zero, and before
we'll get any other API users we'll refactor away this method of
passing in the option, i.e. re-roll [1].
B) Even if hook.c wasn't the only user we don't have callers of this
API that concurrently invoke this parallel process starting API
itself in parallel.
As noted above "A" && "B" are rather nasty, and we don't want to live
with those caveats long-term, but for now they should be an acceptable
compromise.
1. https://lore.kernel.org/git/cover-v2-0.8-00000000000-20220518T195858Z-avarab@gmail.com/
Signed-off-by: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2022-06-07 08:48:19 +00:00
|
|
|
pp_output(&pp);
|
|
|
|
}
|
2022-10-12 21:02:29 +00:00
|
|
|
code = pp_collect_finished(&pp, opts);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
if (code) {
|
|
|
|
pp.shutdown = 1;
|
|
|
|
if (code < 0)
|
2022-10-12 21:02:34 +00:00
|
|
|
kill_children(&pp, opts,-code);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-12 21:02:33 +00:00
|
|
|
pp_cleanup(&pp, opts);
|
2022-10-12 21:02:26 +00:00
|
|
|
|
|
|
|
if (do_trace2)
|
|
|
|
trace2_region_leave(tr2_category, tr2_label, NULL);
|
run-command: add an asynchronous parallel child processor
This allows to run external commands in parallel with ordered output
on stderr.
If we run external commands in parallel we cannot pipe the output directly
to the our stdout/err as it would mix up. So each process's output will
flow through a pipe, which we buffer. One subprocess can be directly
piped to out stdout/err for a low latency feedback to the user.
Example:
Let's assume we have 5 submodules A,B,C,D,E and each fetch takes a
different amount of time as the different submodules vary in size, then
the output of fetches in sequential order might look like this:
time -->
output: |---A---| |-B-| |-------C-------| |-D-| |-E-|
When we schedule these submodules into maximal two parallel processes,
a schedule and sample output over time may look like this:
process 1: |---A---| |-D-| |-E-|
process 2: |-B-| |-------C-------|
output: |---A---|B|---C-------|DE
So A will be perceived as it would run normally in the single child
version. As B has finished by the time A is done, we can dump its whole
progress buffer on stderr, such that it looks like it finished in no
time. Once that is done, C is determined to be the visible child and
its progress will be reported in real time.
So this way of output is really good for human consumption, as it only
changes the timing, not the actual output.
For machine consumption the output needs to be prepared in the tasks,
by either having a prefix per line or per block to indicate whose tasks
output is displayed, because the output order may not follow the
original sequential ordering:
|----A----| |--B--| |-C-|
will be scheduled to be all parallel:
process 1: |----A----|
process 2: |--B--|
process 3: |-C-|
output: |----A----|CB
This happens because C finished before B did, so it will be queued for
output before B.
To detect when a child has finished executing, we check interleaved
with other actions (such as checking the liveliness of children or
starting new processes) whether the stderr pipe still exists. Once a
child closed its stderr stream, we assume it is terminating very soon,
and use `finish_command()` from the single external process execution
interface to collect the exit status.
By maintaining the strong assumption of stderr being open until the
very end of a child process, we can avoid other hassle such as an
implementation using `waitpid(-1)`, which is not implemented in Windows.
Signed-off-by: Stefan Beller <sbeller@google.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2015-12-16 00:04:10 +00:00
|
|
|
}
|
2019-02-22 22:25:01 +00:00
|
|
|
|
2020-09-17 18:11:44 +00:00
|
|
|
int run_auto_maintenance(int quiet)
|
auto-gc: extract a reusable helper from "git fetch"
Back in 1991006c (fetch: convert argv_gc_auto to struct argv_array,
2014-08-16), we taught "git fetch --quiet" to pass the "--quiet"
option down to "gc --auto". This issue, however, is not limited to
"fetch":
$ git grep -e 'gc.*--auto' \*.c
finds hits in "am", "commit", "merge", and "rebase" and these
commands do not pass "--quiet" down to "gc --auto" when they
themselves are told to be quiet.
As a preparatory step, let's introduce a helper function
run_auto_gc(), that the caller can pass a boolean "quiet",
and redo the fix to "git fetch" using the helper.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Reviewed-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-05-06 20:18:29 +00:00
|
|
|
{
|
2020-08-28 15:45:12 +00:00
|
|
|
int enabled;
|
2020-09-17 18:11:44 +00:00
|
|
|
struct child_process maint = CHILD_PROCESS_INIT;
|
auto-gc: extract a reusable helper from "git fetch"
Back in 1991006c (fetch: convert argv_gc_auto to struct argv_array,
2014-08-16), we taught "git fetch --quiet" to pass the "--quiet"
option down to "gc --auto". This issue, however, is not limited to
"fetch":
$ git grep -e 'gc.*--auto' \*.c
finds hits in "am", "commit", "merge", and "rebase" and these
commands do not pass "--quiet" down to "gc --auto" when they
themselves are told to be quiet.
As a preparatory step, let's introduce a helper function
run_auto_gc(), that the caller can pass a boolean "quiet",
and redo the fix to "git fetch" using the helper.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Reviewed-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-05-06 20:18:29 +00:00
|
|
|
|
2020-08-28 15:45:12 +00:00
|
|
|
if (!git_config_get_bool("maintenance.auto", &enabled) &&
|
|
|
|
!enabled)
|
|
|
|
return 0;
|
|
|
|
|
2020-09-17 18:11:44 +00:00
|
|
|
maint.git_cmd = 1;
|
2021-09-09 09:47:07 +00:00
|
|
|
maint.close_object_store = 1;
|
2020-09-17 18:11:44 +00:00
|
|
|
strvec_pushl(&maint.args, "maintenance", "run", "--auto", NULL);
|
|
|
|
strvec_push(&maint.args, quiet ? "--quiet" : "--no-quiet");
|
|
|
|
|
|
|
|
return run_command(&maint);
|
auto-gc: extract a reusable helper from "git fetch"
Back in 1991006c (fetch: convert argv_gc_auto to struct argv_array,
2014-08-16), we taught "git fetch --quiet" to pass the "--quiet"
option down to "gc --auto". This issue, however, is not limited to
"fetch":
$ git grep -e 'gc.*--auto' \*.c
finds hits in "am", "commit", "merge", and "rebase" and these
commands do not pass "--quiet" down to "gc --auto" when they
themselves are told to be quiet.
As a preparatory step, let's introduce a helper function
run_auto_gc(), that the caller can pass a boolean "quiet",
and redo the fix to "git fetch" using the helper.
Signed-off-by: Junio C Hamano <gitster@pobox.com>
Reviewed-by: Taylor Blau <me@ttaylorr.com>
Signed-off-by: Junio C Hamano <gitster@pobox.com>
2020-05-06 20:18:29 +00:00
|
|
|
}
|
2021-06-17 17:13:25 +00:00
|
|
|
|
2022-06-02 09:09:51 +00:00
|
|
|
void prepare_other_repo_env(struct strvec *env, const char *new_git_dir)
|
2021-06-17 17:13:25 +00:00
|
|
|
{
|
|
|
|
const char * const *var;
|
|
|
|
|
|
|
|
for (var = local_repo_env; *var; var++) {
|
|
|
|
if (strcmp(*var, CONFIG_DATA_ENVIRONMENT) &&
|
|
|
|
strcmp(*var, CONFIG_COUNT_ENVIRONMENT))
|
2022-06-02 09:09:51 +00:00
|
|
|
strvec_push(env, *var);
|
2021-06-17 17:13:25 +00:00
|
|
|
}
|
2022-06-02 09:09:51 +00:00
|
|
|
strvec_pushf(env, "%s=%s", GIT_DIR_ENVIRONMENT, new_git_dir);
|
2021-06-17 17:13:25 +00:00
|
|
|
}
|
2021-09-20 15:36:17 +00:00
|
|
|
|
|
|
|
enum start_bg_result start_bg_command(struct child_process *cmd,
|
|
|
|
start_bg_wait_cb *wait_cb,
|
|
|
|
void *cb_data,
|
|
|
|
unsigned int timeout_sec)
|
|
|
|
{
|
|
|
|
enum start_bg_result sbgr = SBGR_ERROR;
|
|
|
|
int ret;
|
|
|
|
int wait_status;
|
|
|
|
pid_t pid_seen;
|
|
|
|
time_t time_limit;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We do not allow clean-on-exit because the child process
|
|
|
|
* should persist in the background and possibly/probably
|
|
|
|
* after this process exits. So we don't want to kill the
|
|
|
|
* child during our atexit routine.
|
|
|
|
*/
|
|
|
|
if (cmd->clean_on_exit)
|
|
|
|
BUG("start_bg_command() does not allow non-zero clean_on_exit");
|
|
|
|
|
|
|
|
if (!cmd->trace2_child_class)
|
|
|
|
cmd->trace2_child_class = "background";
|
|
|
|
|
|
|
|
ret = start_command(cmd);
|
|
|
|
if (ret) {
|
|
|
|
/*
|
|
|
|
* We assume that if `start_command()` fails, we
|
|
|
|
* either get a complete `trace2_child_start() /
|
|
|
|
* trace2_child_exit()` pair or it fails before the
|
|
|
|
* `trace2_child_start()` is emitted, so we do not
|
|
|
|
* need to worry about it here.
|
|
|
|
*
|
|
|
|
* We also assume that `start_command()` does not add
|
|
|
|
* us to the cleanup list. And that it calls
|
2023-01-07 13:56:55 +00:00
|
|
|
* `child_process_clear()`.
|
2021-09-20 15:36:17 +00:00
|
|
|
*/
|
|
|
|
sbgr = SBGR_ERROR;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
time(&time_limit);
|
|
|
|
time_limit += timeout_sec;
|
|
|
|
|
|
|
|
wait:
|
|
|
|
pid_seen = waitpid(cmd->pid, &wait_status, WNOHANG);
|
|
|
|
|
|
|
|
if (!pid_seen) {
|
|
|
|
/*
|
|
|
|
* The child is currently running. Ask the callback
|
|
|
|
* if the child is ready to do work or whether we
|
|
|
|
* should keep waiting for it to boot up.
|
|
|
|
*/
|
|
|
|
ret = (*wait_cb)(cmd, cb_data);
|
|
|
|
if (!ret) {
|
|
|
|
/*
|
|
|
|
* The child is running and "ready".
|
|
|
|
*/
|
|
|
|
trace2_child_ready(cmd, "ready");
|
|
|
|
sbgr = SBGR_READY;
|
|
|
|
goto done;
|
|
|
|
} else if (ret > 0) {
|
|
|
|
/*
|
|
|
|
* The callback said to give it more time to boot up
|
|
|
|
* (subject to our timeout limit).
|
|
|
|
*/
|
|
|
|
time_t now;
|
|
|
|
|
|
|
|
time(&now);
|
|
|
|
if (now < time_limit)
|
|
|
|
goto wait;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Our timeout has expired. We don't try to
|
|
|
|
* kill the child, but rather let it continue
|
|
|
|
* (hopefully) trying to startup.
|
|
|
|
*/
|
|
|
|
trace2_child_ready(cmd, "timeout");
|
|
|
|
sbgr = SBGR_TIMEOUT;
|
|
|
|
goto done;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The cb gave up on this child. It is still running,
|
|
|
|
* but our cb got an error trying to probe it.
|
|
|
|
*/
|
|
|
|
trace2_child_ready(cmd, "error");
|
|
|
|
sbgr = SBGR_CB_ERROR;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (pid_seen == cmd->pid) {
|
|
|
|
int child_code = -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The child started, but exited or was terminated
|
|
|
|
* before becoming "ready".
|
|
|
|
*
|
|
|
|
* We try to match the behavior of `wait_or_whine()`
|
|
|
|
* WRT the handling of WIFSIGNALED() and WIFEXITED()
|
|
|
|
* and convert the child's status to a return code for
|
|
|
|
* tracing purposes and emit the `trace2_child_exit()`
|
|
|
|
* event.
|
|
|
|
*
|
|
|
|
* We do not want the wait_or_whine() error message
|
|
|
|
* because we will be called by client-side library
|
|
|
|
* routines.
|
|
|
|
*/
|
|
|
|
if (WIFEXITED(wait_status))
|
|
|
|
child_code = WEXITSTATUS(wait_status);
|
|
|
|
else if (WIFSIGNALED(wait_status))
|
|
|
|
child_code = WTERMSIG(wait_status) + 128;
|
|
|
|
trace2_child_exit(cmd, child_code);
|
|
|
|
|
|
|
|
sbgr = SBGR_DIED;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (pid_seen < 0 && errno == EINTR)
|
|
|
|
goto wait;
|
|
|
|
|
|
|
|
trace2_child_exit(cmd, -1);
|
|
|
|
sbgr = SBGR_ERROR;
|
|
|
|
|
|
|
|
done:
|
|
|
|
child_process_clear(cmd);
|
|
|
|
invalidate_lstat_cache();
|
|
|
|
return sbgr;
|
|
|
|
}
|