1
0
mirror of https://github.com/git/git synced 2024-07-05 00:58:49 +00:00

Merge branch 'sp/clip-read-write-to-8mb'

Send a large request to read(2)/write(2) as a smaller but still
reasonably large chunks, which would improve the latency when the
operation needs to be killed and incidentally works around broken
64-bit systems that cannot take a 2GB write or read in one go.

* sp/clip-read-write-to-8mb:
  Revert "compat/clipped-write.c: large write(2) fails on Mac OS X/XNU"
  xread, xwrite: limit size of IO to 8MB
This commit is contained in:
Junio C Hamano 2013-09-09 14:50:39 -07:00
commit a23274e127
6 changed files with 26 additions and 27 deletions

View File

@ -69,9 +69,6 @@ all::
# Define NO_MSGFMT_EXTENDED_OPTIONS if your implementation of msgfmt
# doesn't support GNU extensions like --check and --statistics
#
# Define NEEDS_CLIPPED_WRITE if your write(2) cannot write more than
# INT_MAX bytes at once (e.g. MacOS X).
#
# Define HAVE_PATHS_H if you have paths.h and want to use the default PATH
# it specifies.
#
@ -1499,11 +1496,6 @@ ifndef NO_MSGFMT_EXTENDED_OPTIONS
MSGFMT += --check --statistics
endif
ifdef NEEDS_CLIPPED_WRITE
BASIC_CFLAGS += -DNEEDS_CLIPPED_WRITE
COMPAT_OBJS += compat/clipped-write.o
endif
ifneq (,$(XDL_FAST_HASH))
BASIC_CFLAGS += -DXDL_FAST_HASH
endif

View File

@ -1,13 +0,0 @@
#include "../git-compat-util.h"
#undef write
/*
* Version of write that will write at most INT_MAX bytes.
* Workaround a xnu bug on Mac OS X
*/
ssize_t clipped_write(int fildes, const void *buf, size_t nbyte)
{
if (nbyte > INT_MAX)
nbyte = INT_MAX;
return write(fildes, buf, nbyte);
}

View File

@ -95,7 +95,6 @@ ifeq ($(uname_S),Darwin)
NO_MEMMEM = YesPlease
USE_ST_TIMESPEC = YesPlease
HAVE_DEV_TTY = YesPlease
NEEDS_CLIPPED_WRITE = YesPlease
COMPAT_OBJS += compat/precompose_utf8.o
BASIC_CFLAGS += -DPRECOMPOSE_UNICODE
endif

View File

@ -185,11 +185,6 @@ typedef unsigned long uintptr_t;
#define probe_utf8_pathname_composition(a,b)
#endif
#ifdef NEEDS_CLIPPED_WRITE
ssize_t clipped_write(int fildes, const void *buf, size_t nbyte);
#define write(x,y,z) clipped_write((x),(y),(z))
#endif
#ifdef MKDIR_WO_TRAILING_SLASH
#define mkdir(a,b) compat_mkdir_wo_trailing_slash((a),(b))
extern int compat_mkdir_wo_trailing_slash(const char*, mode_t);

View File

@ -190,4 +190,18 @@ test_expect_success 'required filter clean failure' '
test_must_fail git add test.fc
'
test -n "$GIT_TEST_LONG" && test_set_prereq EXPENSIVE
test_expect_success EXPENSIVE 'filter large file' '
git config filter.largefile.smudge cat &&
git config filter.largefile.clean cat &&
for i in $(test_seq 1 2048); do printf "%1048576d" 1; done >2GB &&
echo "2GB filter=largefile" >.gitattributes &&
git add 2GB 2>err &&
! test -s err &&
rm -f 2GB &&
git checkout -- 2GB 2>err &&
! test -s err
'
test_done

View File

@ -130,6 +130,14 @@ void *xcalloc(size_t nmemb, size_t size)
return ret;
}
/*
* Limit size of IO chunks, because huge chunks only cause pain. OS X
* 64-bit is buggy, returning EINVAL if len >= INT_MAX; and even in
* the absense of bugs, large chunks can result in bad latencies when
* you decide to kill the process.
*/
#define MAX_IO_SIZE (8*1024*1024)
/*
* xread() is the same a read(), but it automatically restarts read()
* operations with a recoverable error (EAGAIN and EINTR). xread()
@ -138,6 +146,8 @@ void *xcalloc(size_t nmemb, size_t size)
ssize_t xread(int fd, void *buf, size_t len)
{
ssize_t nr;
if (len > MAX_IO_SIZE)
len = MAX_IO_SIZE;
while (1) {
nr = read(fd, buf, len);
if ((nr < 0) && (errno == EAGAIN || errno == EINTR))
@ -154,6 +164,8 @@ ssize_t xread(int fd, void *buf, size_t len)
ssize_t xwrite(int fd, const void *buf, size_t len)
{
ssize_t nr;
if (len > MAX_IO_SIZE)
len = MAX_IO_SIZE;
while (1) {
nr = write(fd, buf, len);
if ((nr < 0) && (errno == EAGAIN || errno == EINTR))