From 62f6ffb6db0f0ea9a5ebc2c4f5357ed4cfc9a519 Mon Sep 17 00:00:00 2001 From: Harshavardhana Date: Wed, 10 Feb 2016 16:40:09 -0800 Subject: [PATCH] xl: Moved to minio/minio - fixes #1112 --- .gitignore | 2 + .travis.yml | 14 +- Makefile | 30 +- accesslog-handler.go | 2 +- api-auth-utils.go | 2 +- api-signature.go | 18 +- appveyor.yml | 28 +- bucket-handlers.go | 9 +- config-logger-main.go | 2 +- damerau-levenshtein.go | 66 ++ httprange.go | 2 +- jwt.go | 2 +- logger-file-hook.go | 2 +- logger-mongo-hook.go | 2 +- logger-syslog-hook_nix.go | 2 +- logger-syslog-hook_windows.go | 2 +- logger.go | 2 +- logger_test.go | 2 +- main.go | 16 +- notifier.go | 2 +- object-handlers.go | 15 +- .../minio-xl/pkg => pkg}/atomic/atomic.go | 2 +- .../pkg => pkg}/atomic/atomic_test.go | 2 +- .../minio-xl/pkg => pkg}/cpu/cpu_amd64.go | 0 .../minio-xl/pkg => pkg}/cpu/cpu_amd64.s | 0 .../minio/minio-xl/pkg => pkg}/cpu/cpu_arm.go | 0 .../minio-xl/pkg => pkg}/cpu/cpu_test.go | 2 +- .../minio/minio-xl/pkg => pkg}/cpu/doc.go | 0 pkg/crypto/sha1/.gitignore | 1 + .../crypto/sha256 => pkg/crypto/sha1}/LICENSE | 0 pkg/crypto/sha1/sha1.go | 150 +++ pkg/crypto/sha1/sha1_linux.S | 967 ++++++++++++++++++ pkg/crypto/sha1/sha1_sse3_amd64.asm | 579 +++++++++++ pkg/crypto/sha1/sha1_test.go | 154 +++ pkg/crypto/sha1/sha1_yasm_darwin.go | 21 + pkg/crypto/sha1/sha1_yasm_linux.go | 21 + pkg/crypto/sha1/sha1_yasm_windows.go | 21 + pkg/crypto/sha1/sha1block.go | 43 + pkg/crypto/sha1/sha1block_generic.go | 110 ++ pkg/crypto/sha1/sha1block_linux.go | 50 + pkg/crypto/sha1/sha1block_nocgo.go | 23 + .../sha512 => pkg/crypto/sha256}/LICENSE | 0 .../sha256/sha256-avx-asm_linux_amd64.S | 0 .../sha256/sha256-avx2-asm_linux_amd64.S | 0 .../sha256/sha256-ssse3-asm_linux_amd64.S | 0 .../crypto/sha256/sha256.go | 36 +- .../crypto/sha256/sha256_linux.go | 56 +- .../crypto/sha256/sha256_test.go | 16 +- .../crypto/sha256/sha256block.go | 2 +- .../crypto/sha512/LICENSE | 0 .../sha512/sha512-avx-asm_linux_amd64.S | 0 .../sha512/sha512-avx2-asm_linux_amd64.S | 0 .../sha512/sha512-ssse3-asm_linux_amd64.S | 0 .../crypto/sha512/sha512.go | 41 +- .../crypto/sha512/sha512_linux.go | 40 +- .../crypto/sha512/sha512_test.go | 16 +- .../crypto/sha512/sha512block.go | 33 +- pkg/erasure/.gitignore | 1 + pkg/erasure/INSTALLGO.md | 64 ++ pkg/erasure/LICENSE.INTEL | 26 + pkg/erasure/LICENSE.MINIO | 202 ++++ pkg/erasure/README.md | 25 + pkg/erasure/ctypes.go | 62 ++ pkg/erasure/doc.go | 66 ++ pkg/erasure/ec_minio_common.h | 39 + pkg/erasure/ec_minio_decode.c | 142 +++ pkg/erasure/ec_minio_encode.c | 55 + pkg/erasure/erasure_decode.go | 123 +++ pkg/erasure/erasure_encode.go | 174 ++++ pkg/erasure/erasure_test.go | 82 ++ pkg/erasure/stdint.go | 38 + pkg/fs/config.go | 4 +- pkg/fs/errors.go | 10 - pkg/fs/fs-bucket-listobjects.go | 2 +- pkg/fs/fs-bucket.go | 2 +- pkg/fs/fs-multipart.go | 17 +- pkg/fs/fs-object.go | 11 +- pkg/fs/fs.go | 2 +- .../pkg => pkg}/minhttp/LICENSE.Facebook | 0 pkg/minhttp/LICENSE.Minio | 202 ++++ .../minio-xl/pkg => pkg}/minhttp/http_nix.go | 2 +- .../pkg => pkg}/minhttp/http_windows.go | 2 +- .../pkg => pkg}/minhttp/kill_windows.go | 2 +- .../minio-xl/pkg => pkg}/minhttp/listen.go | 0 .../minio/minio-xl/pkg => pkg}/minhttp/net.go | 4 +- .../minio/minio-xl/pkg => pkg}/probe/probe.go | 0 .../minio-xl/pkg => pkg}/probe/probe_test.go | 4 +- .../minio-xl/pkg => pkg}/probe/wrapper.go | 0 .../minio-xl/pkg => pkg}/quick/errorutil.go | 0 .../minio/minio-xl/pkg => pkg}/quick/quick.go | 7 +- .../minio-xl/pkg => pkg}/quick/quick_test.go | 2 +- pkg/signature/errors.go | 48 + pkg/{fs => signature}/postpolicyform.go | 4 +- .../signature-v4.go} | 9 +- pkg/tasker/commands.go | 44 + pkg/tasker/handle.go | 56 + pkg/tasker/status.go | 35 + pkg/tasker/task.go | 103 ++ pkg/tasker/taskctl.go | 164 +++ pkg/tasker/taskctl_test.go | 38 + pkg/xl/LICENSE | 202 ++++ pkg/xl/README.md | 3 + pkg/xl/acl.go | 47 + pkg/xl/block/block.go | 196 ++++ pkg/xl/block/block_test.go | 83 ++ pkg/xl/bucket.go | 639 ++++++++++++ pkg/xl/cache/data/data.go | 204 ++++ pkg/xl/cache/data/data_test.go | 45 + pkg/xl/cache/metadata/metadata.go | 110 ++ pkg/xl/cache/metadata/metadata_test.go | 46 + pkg/xl/common.go | 190 ++++ pkg/xl/config.go | 80 ++ pkg/xl/definitions.go | 157 +++ pkg/xl/encoder.go | 71 ++ pkg/xl/errors.go | 333 ++++++ pkg/xl/heal.go | 69 ++ pkg/xl/interfaces.go | 72 ++ pkg/xl/management.go | 81 ++ pkg/xl/multipart.go | 514 ++++++++++ pkg/xl/node.go | 76 ++ pkg/xl/xl-metadata.md | 55 + pkg/xl/xl-v1.go | 681 ++++++++++++ pkg/xl/xl-v1_test.go | 290 ++++++ pkg/xl/xl-v2.go | 637 ++++++++++++ pkg/xl/xl-v2_test.go | 265 +++++ routers.go | 2 +- server-config.go | 4 +- server-main.go | 4 +- signature-handler.go | 11 +- signature_utils_test.go | 2 +- update-main.go | 2 +- .../pkg/crypto/sha256/sha256_darwin.go | 56 - .../pkg/crypto/sha256/sha256_windows.go | 56 - .../pkg/crypto/sha512/sha512_darwin.go | 61 -- .../pkg/crypto/sha512/sha512_windows.go | 61 -- vendor/vendor.json | 35 - web-handlers.go | 2 +- 137 files changed, 9408 insertions(+), 515 deletions(-) create mode 100644 damerau-levenshtein.go rename {vendor/github.com/minio/minio-xl/pkg => pkg}/atomic/atomic.go (98%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/atomic/atomic_test.go (96%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/cpu/cpu_amd64.go (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/cpu/cpu_amd64.s (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/cpu/cpu_arm.go (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/cpu/cpu_test.go (97%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/cpu/doc.go (100%) create mode 100644 pkg/crypto/sha1/.gitignore rename {vendor/github.com/minio/minio-xl/pkg/crypto/sha256 => pkg/crypto/sha1}/LICENSE (100%) create mode 100644 pkg/crypto/sha1/sha1.go create mode 100644 pkg/crypto/sha1/sha1_linux.S create mode 100644 pkg/crypto/sha1/sha1_sse3_amd64.asm create mode 100644 pkg/crypto/sha1/sha1_test.go create mode 100644 pkg/crypto/sha1/sha1_yasm_darwin.go create mode 100644 pkg/crypto/sha1/sha1_yasm_linux.go create mode 100644 pkg/crypto/sha1/sha1_yasm_windows.go create mode 100644 pkg/crypto/sha1/sha1block.go create mode 100644 pkg/crypto/sha1/sha1block_generic.go create mode 100644 pkg/crypto/sha1/sha1block_linux.go create mode 100644 pkg/crypto/sha1/sha1block_nocgo.go rename {vendor/github.com/minio/minio-xl/pkg/crypto/sha512 => pkg/crypto/sha256}/LICENSE (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/crypto/sha256/sha256-avx-asm_linux_amd64.S (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/crypto/sha256/sha256-avx2-asm_linux_amd64.S (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/crypto/sha256/sha256-ssse3-asm_linux_amd64.S (100%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux.go => pkg/crypto/sha256/sha256.go (58%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64.go => pkg/crypto/sha256/sha256_linux.go (80%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64_test.go => pkg/crypto/sha256/sha256_test.go (89%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256block_linux_amd64.go => pkg/crypto/sha256/sha256block.go (99%) rename vendor/github.com/minio/minio-xl/pkg/minhttp/LICENSE.Minio => pkg/crypto/sha512/LICENSE (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/crypto/sha512/sha512-avx-asm_linux_amd64.S (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/crypto/sha512/sha512-avx2-asm_linux_amd64.S (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/crypto/sha512/sha512-ssse3-asm_linux_amd64.S (100%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux.go => pkg/crypto/sha512/sha512.go (55%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64.go => pkg/crypto/sha512/sha512_linux.go (86%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64_test.go => pkg/crypto/sha512/sha512_test.go (92%) rename vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512block_linux_amd64.go => pkg/crypto/sha512/sha512block.go (87%) create mode 100644 pkg/erasure/.gitignore create mode 100644 pkg/erasure/INSTALLGO.md create mode 100644 pkg/erasure/LICENSE.INTEL create mode 100644 pkg/erasure/LICENSE.MINIO create mode 100644 pkg/erasure/README.md create mode 100644 pkg/erasure/ctypes.go create mode 100644 pkg/erasure/doc.go create mode 100644 pkg/erasure/ec_minio_common.h create mode 100644 pkg/erasure/ec_minio_decode.c create mode 100644 pkg/erasure/ec_minio_encode.c create mode 100644 pkg/erasure/erasure_decode.go create mode 100644 pkg/erasure/erasure_encode.go create mode 100644 pkg/erasure/erasure_test.go create mode 100644 pkg/erasure/stdint.go rename {vendor/github.com/minio/minio-xl/pkg => pkg}/minhttp/LICENSE.Facebook (100%) create mode 100644 pkg/minhttp/LICENSE.Minio rename {vendor/github.com/minio/minio-xl/pkg => pkg}/minhttp/http_nix.go (99%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/minhttp/http_windows.go (99%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/minhttp/kill_windows.go (98%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/minhttp/listen.go (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/minhttp/net.go (99%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/probe/probe.go (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/probe/probe_test.go (93%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/probe/wrapper.go (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/quick/errorutil.go (100%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/quick/quick.go (97%) rename {vendor/github.com/minio/minio-xl/pkg => pkg}/quick/quick_test.go (99%) create mode 100644 pkg/signature/errors.go rename pkg/{fs => signature}/postpolicyform.go (98%) rename pkg/{fs/signature.go => signature/signature-v4.go} (98%) create mode 100644 pkg/tasker/commands.go create mode 100644 pkg/tasker/handle.go create mode 100644 pkg/tasker/status.go create mode 100644 pkg/tasker/task.go create mode 100644 pkg/tasker/taskctl.go create mode 100644 pkg/tasker/taskctl_test.go create mode 100644 pkg/xl/LICENSE create mode 100644 pkg/xl/README.md create mode 100644 pkg/xl/acl.go create mode 100644 pkg/xl/block/block.go create mode 100644 pkg/xl/block/block_test.go create mode 100644 pkg/xl/bucket.go create mode 100644 pkg/xl/cache/data/data.go create mode 100644 pkg/xl/cache/data/data_test.go create mode 100644 pkg/xl/cache/metadata/metadata.go create mode 100644 pkg/xl/cache/metadata/metadata_test.go create mode 100644 pkg/xl/common.go create mode 100644 pkg/xl/config.go create mode 100644 pkg/xl/definitions.go create mode 100644 pkg/xl/encoder.go create mode 100644 pkg/xl/errors.go create mode 100644 pkg/xl/heal.go create mode 100644 pkg/xl/interfaces.go create mode 100644 pkg/xl/management.go create mode 100644 pkg/xl/multipart.go create mode 100644 pkg/xl/node.go create mode 100644 pkg/xl/xl-metadata.md create mode 100644 pkg/xl/xl-v1.go create mode 100644 pkg/xl/xl-v1_test.go create mode 100644 pkg/xl/xl-v2.go create mode 100644 pkg/xl/xl-v2_test.go delete mode 100644 vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_darwin.go delete mode 100644 vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_windows.go delete mode 100644 vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_darwin.go delete mode 100644 vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_windows.go diff --git a/.gitignore b/.gitignore index 0d83a8db8..3eefca6c7 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ site/ **/access.log ui-assets.go ui-assets.asc +build +isa-l \ No newline at end of file diff --git a/.travis.yml b/.travis.yml index 7ec870ae0..b96b98026 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,21 @@ -sudo: false +sudo: required +dist: trusty language: go os: - linux - osx +before_install: + - git clone https://github.com/yasm/yasm + - cd yasm + - git checkout v1.3.0 + - "./autogen.sh" + - "./configure" + - make + - export PATH=$PATH:`pwd` + - cd .. + osx_image: xcode7.2 env: @@ -16,7 +27,6 @@ script: - make test GOFLAGS="-race" go: -- 1.5.2 - 1.5.3 notifications: diff --git a/Makefile b/Makefile index ee6d6b41e..cf288afc9 100644 --- a/Makefile +++ b/Makefile @@ -48,6 +48,15 @@ fmt: @GO15VENDOREXPERIMENT=1 gofmt -s -l *.go @GO15VENDOREXPERIMENT=1 gofmt -s -l pkg +## Configure Intel library. +isa-l: + @echo "Configuring $@:" + @git clone -q https://github.com/minio/isa-l.git + @mkdir -p build + @cd build; $(PWD)/isa-l/configure --prefix $(PWD)/build/lib --sysconfdir $(PWD)/build/lib --includedir $(PWD)/build/lib --libdir $(PWD)/build/lib >/dev/null + @make -C build >/dev/null + @make -C build install >/dev/null + lint: @echo "Running $@:" @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/golint *.go @@ -58,8 +67,8 @@ cyclo: @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/gocyclo -over 65 *.go @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/gocyclo -over 65 pkg -build: getdeps verifiers $(UI_ASSETS) - @echo "Installing minio:" +build: getdeps verifiers $(UI_ASSETS) isa-l + @GO15VENDOREXPERIMENT=1 go generate ./... deadcode: @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/deadcode @@ -69,12 +78,13 @@ spelling: @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/misspell pkg/**/* test: build - @echo "Running all testing:" - @GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) . - @GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) github.com/minio/minio/pkg... + @echo "Running all minio testing:" + @CGO_CPPFLAGS="-I$(PWD)/build/lib" CGO_LDFLAGS="$(PWD)/build/lib/libisal.a" GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) . + @CGO_CPPFLAGS="-I$(PWD)/build/lib" CGO_LDFLAGS="$(PWD)/build/lib/libisal.a" GO15VENDOREXPERIMENT=1 go test $(GOFLAGS) github.com/minio/minio/pkg... gomake-all: build - @GO15VENDOREXPERIMENT=1 go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio + @echo "Installing minio:" + @CGO_CPPFLAGS="-I$(PWD)/build/lib" CGO_LDFLAGS="$(PWD)/build/lib/libisal.a" GO15VENDOREXPERIMENT=1 go build --ldflags $(BUILD_LDFLAGS) -o $(GOPATH)/bin/minio pkg-add: @GO15VENDOREXPERIMENT=1 ${GOPATH}/bin/govendor add $(PKG) @@ -100,9 +110,9 @@ release: clean: @echo "Cleaning up all the generated files:" - @rm -fv cover.out - @rm -fv minio - @rm -fv minio.test - @rm -fv pkg/fs/fs.test + @rm -fv minio minio.test cover.out + @find . -name '*.test' | xargs rm -fv @rm -fv ui-assets.go @rm -fv ui-assets.asc + @rm -rf isa-l + @rm -rf build diff --git a/accesslog-handler.go b/accesslog-handler.go index cb28b39f6..7a0211009 100644 --- a/accesslog-handler.go +++ b/accesslog-handler.go @@ -23,7 +23,7 @@ import ( "os" "time" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) type accessLogHandler struct { diff --git a/api-auth-utils.go b/api-auth-utils.go index 1e67e62b7..cd98fa8b8 100644 --- a/api-auth-utils.go +++ b/api-auth-utils.go @@ -21,7 +21,7 @@ import ( "encoding/base64" "regexp" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) const ( diff --git a/api-signature.go b/api-signature.go index b3250665a..c178667b0 100644 --- a/api-signature.go +++ b/api-signature.go @@ -26,8 +26,8 @@ import ( "strings" "time" - "github.com/minio/minio-xl/pkg/probe" - "github.com/minio/minio/pkg/fs" + "github.com/minio/minio/pkg/probe" + v4 "github.com/minio/minio/pkg/signature" ) const ( @@ -125,7 +125,7 @@ func stripAccessKeyID(authHeaderValue string) (string, *probe.Error) { } // initSignatureV4 initializing signature verification. -func initSignatureV4(req *http.Request) (*fs.Signature, *probe.Error) { +func initSignatureV4(req *http.Request) (*v4.Signature, *probe.Error) { // strip auth from authorization header. authHeaderValue := req.Header.Get("Authorization") @@ -156,7 +156,7 @@ func initSignatureV4(req *http.Request) (*fs.Signature, *probe.Error) { return nil, err.Trace(authHeaderValue) } if config.Credentials.AccessKeyID == accessKeyID { - signature := &fs.Signature{ + signature := &v4.Signature{ AccessKeyID: config.Credentials.AccessKeyID, SecretAccessKey: config.Credentials.SecretAccessKey, Region: region, @@ -203,7 +203,7 @@ func applyPolicy(formValues map[string]string) *probe.Error { if e != nil { return probe.NewError(e) } - postPolicyForm, err := fs.ParsePostPolicyForm(string(policyBytes)) + postPolicyForm, err := v4.ParsePostPolicyForm(string(policyBytes)) if err != nil { return err.Trace() } @@ -244,7 +244,7 @@ func applyPolicy(formValues map[string]string) *probe.Error { } // initPostPresignedPolicyV4 initializing post policy signature verification -func initPostPresignedPolicyV4(formValues map[string]string) (*fs.Signature, *probe.Error) { +func initPostPresignedPolicyV4(formValues map[string]string) (*v4.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(formValues["X-Amz-Credential"]), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) @@ -259,7 +259,7 @@ func initPostPresignedPolicyV4(formValues map[string]string) (*fs.Signature, *pr } region := credentialElements[2] if config.Credentials.AccessKeyID == accessKeyID { - signature := &fs.Signature{ + signature := &v4.Signature{ AccessKeyID: config.Credentials.AccessKeyID, SecretAccessKey: config.Credentials.SecretAccessKey, Region: region, @@ -272,7 +272,7 @@ func initPostPresignedPolicyV4(formValues map[string]string) (*fs.Signature, *pr } // initPresignedSignatureV4 initializing presigned signature verification -func initPresignedSignatureV4(req *http.Request) (*fs.Signature, *probe.Error) { +func initPresignedSignatureV4(req *http.Request) (*v4.Signature, *probe.Error) { credentialElements := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-Credential")), "/") if len(credentialElements) != 5 { return nil, probe.NewError(errCredentialTagMalformed) @@ -289,7 +289,7 @@ func initPresignedSignatureV4(req *http.Request) (*fs.Signature, *probe.Error) { signedHeaders := strings.Split(strings.TrimSpace(req.URL.Query().Get("X-Amz-SignedHeaders")), ";") signature := strings.TrimSpace(req.URL.Query().Get("X-Amz-Signature")) if config.Credentials.AccessKeyID == accessKeyID { - signature := &fs.Signature{ + signature := &v4.Signature{ AccessKeyID: config.Credentials.AccessKeyID, SecretAccessKey: config.Credentials.SecretAccessKey, Region: region, diff --git a/appveyor.yml b/appveyor.yml index e1e5fd872..24fa4a07c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,30 +1,46 @@ -# version format -version: "{build}" - # Operating system (build VM template) -os: Windows Server 2012 R2 +os: Visual Studio 2015 + +platform: x64 clone_folder: c:\gopath\src\github.com\minio\minio # environment variables environment: GOPATH: c:\gopath + GO_EXTLINK_ENABLED: 0 GO15VENDOREXPERIMENT: 1 UI_ASSETS: ui-assets.go UI_ASSETS_ARMOR: ui-assets.asc # scripts that run after cloning repository install: + - '"C:\Program Files\Microsoft SDKs\Windows\v7.1\Bin\SetEnv.cmd" /x64' + - curl -fsSL -o c:\go\bin\yasm.exe http://www.tortall.net/projects/yasm/releases/yasm-1.3.0-win64.exe - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - curl -fsSL -o mingw-w64.7z http://downloads.sourceforge.net/project/mingw-w64-dgn/mingw-w64/mingw-w64-bin-x86_64-20151206.7z + - 7z x -oC:\ mingw-w64.7z > NUL + - set PATH=C:\mingw64\bin;%PATH% + - x86_64-w64-mingw32-gcc --version - go version - go env + - git clone https://github.com/minio/isa-l + - cd isa-l + - make -f Makefile.unx arch=mingw + - mv include isa-l + - set CGO_CPPFLAGS=-Ic:/gopath/src/github.com/minio/minio/isa-l + - set CGO_LDFLAGS=c:/gopath/src/github.com/minio/minio/isa-l/isa-l.a + - set CC=x86_64-w64-mingw32-gcc + - set CXX=x86_64-w64-mingw32-g++ + - cd %GOPATH%\src\github.com\minio\minio # to run your custom scripts instead of automatic MSBuild build_script: - - curl -o ui-assets.go -L https://dl.minio.io/assets/server/ui/%UI_ASSETS% - - curl -o ui-assets.asc -L https://dl.minio.io/assets/server/ui/%UI_ASSETS_ARMOR% + - curl -fsSL -o ui-assets.go https://dl.minio.io/assets/server/ui/%UI_ASSETS% + - curl -fsSL -o ui-assets.asc https://dl.minio.io/assets/server/ui/%UI_ASSETS_ARMOR% - gpg --batch --no-tty --yes --keyserver pgp.mit.edu --recv-keys F9AAC728 - gpg --batch --no-tty --verify %UI_ASSETS_ARMOR% %UI_ASSETS% + - go generate ./... - go test . - go test -race . - go test github.com/minio/minio/pkg... diff --git a/bucket-handlers.go b/bucket-handlers.go index 546cfefc6..ddebb22c0 100644 --- a/bucket-handlers.go +++ b/bucket-handlers.go @@ -22,9 +22,10 @@ import ( "net/http" "github.com/gorilla/mux" - "github.com/minio/minio-xl/pkg/crypto/sha256" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/fs" + "github.com/minio/minio/pkg/probe" + v4 "github.com/minio/minio/pkg/signature" ) // GetBucketLocationHandler - GET Bucket location. @@ -204,7 +205,7 @@ func (api CloudStorageAPI) PutBucketHandler(w http.ResponseWriter, req *http.Req return } - var signature *fs.Signature + var signature *v4.Signature // Init signature V4 verification if isRequestSignatureV4(req) { var err *probe.Error @@ -340,7 +341,7 @@ func (api CloudStorageAPI) PostPolicyBucketHandler(w http.ResponseWriter, req *h writeErrorResponse(w, req, InvalidBucketName, req.URL.Path) case fs.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) - case fs.SignatureDoesNotMatch: + case v4.SigDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case fs.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) diff --git a/config-logger-main.go b/config-logger-main.go index b35863d94..d413c48e8 100644 --- a/config-logger-main.go +++ b/config-logger-main.go @@ -20,7 +20,7 @@ import ( "runtime" "github.com/minio/cli" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // Configure logger diff --git a/damerau-levenshtein.go b/damerau-levenshtein.go new file mode 100644 index 000000000..a01361a5d --- /dev/null +++ b/damerau-levenshtein.go @@ -0,0 +1,66 @@ +/* + * Minio Client (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "math" +) + +// Returns the minimum value of a slice of integers +func minimum(integers []int) (minVal int) { + minVal = math.MaxInt32 + for _, v := range integers { + if v < minVal { + minVal = v + } + } + return +} + +// DamerauLevenshteinDistance calculates distance between two strings using an algorithm +// described in https://en.wikipedia.org/wiki/Damerau-Levenshtein_distance +func DamerauLevenshteinDistance(a string, b string) int { + d := make([][]int, len(a)+1) + for i := 1; i <= len(a)+1; i++ { + d[i-1] = make([]int, len(b)+1) + } + for i := 0; i <= len(a); i++ { + d[i][0] = i + } + for j := 0; j <= len(b); j++ { + d[0][j] = j + } + for i := 1; i <= len(a); i++ { + for j := 1; j <= len(b); j++ { + cost := 0 + if a[i-1] == b[j-1] { + cost = 0 + } else { + cost = 1 + } + d[i][j] = minimum([]int{ + d[i-1][j] + 1, + d[i][j-1] + 1, + d[i-1][j-1] + cost, + }) + if i > 1 && j > 1 && a[i-1] == b[j-2] && a[i-2] == b[j-1] { + d[i][j] = minimum([]int{d[i][j], d[i-2][j-2] + cost}) // transposition + } + } + } + return d[len(a)][len(b)] +} diff --git a/httprange.go b/httprange.go index 7d6ddae94..c08e11136 100644 --- a/httprange.go +++ b/httprange.go @@ -22,8 +22,8 @@ import ( "strconv" "strings" - "github.com/minio/minio-xl/pkg/probe" "github.com/minio/minio/pkg/fs" + "github.com/minio/minio/pkg/probe" ) const ( diff --git a/jwt.go b/jwt.go index 158d49396..db6944994 100644 --- a/jwt.go +++ b/jwt.go @@ -22,7 +22,7 @@ import ( "time" jwtgo "github.com/dgrijalva/jwt-go" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" "golang.org/x/crypto/bcrypt" ) diff --git a/logger-file-hook.go b/logger-file-hook.go index 11609c2e1..074868b23 100644 --- a/logger-file-hook.go +++ b/logger-file-hook.go @@ -21,7 +21,7 @@ import ( "os" "github.com/Sirupsen/logrus" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) type localFile struct { diff --git a/logger-mongo-hook.go b/logger-mongo-hook.go index 8475bc578..6765f79a5 100644 --- a/logger-mongo-hook.go +++ b/logger-mongo-hook.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/Sirupsen/logrus" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) diff --git a/logger-syslog-hook_nix.go b/logger-syslog-hook_nix.go index bbcce20a8..eb6b0df99 100644 --- a/logger-syslog-hook_nix.go +++ b/logger-syslog-hook_nix.go @@ -23,7 +23,7 @@ import ( "log/syslog" "github.com/Sirupsen/logrus" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // syslogHook to send logs via syslog. diff --git a/logger-syslog-hook_windows.go b/logger-syslog-hook_windows.go index 949f2d04b..993d6dc41 100644 --- a/logger-syslog-hook_windows.go +++ b/logger-syslog-hook_windows.go @@ -18,7 +18,7 @@ package main -import "github.com/minio/minio-xl/pkg/probe" +import "github.com/minio/minio/pkg/probe" func log2Syslog(network, raddr string) *probe.Error { return probe.NewError(errSysLogNotSupported) diff --git a/logger.go b/logger.go index 2ab8b118a..117ed44ff 100644 --- a/logger.go +++ b/logger.go @@ -21,7 +21,7 @@ import ( "reflect" "github.com/Sirupsen/logrus" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) type fields map[string]interface{} diff --git a/logger_test.go b/logger_test.go index 2b8c7adab..09c654218 100644 --- a/logger_test.go +++ b/logger_test.go @@ -22,7 +22,7 @@ import ( "errors" "github.com/Sirupsen/logrus" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" . "gopkg.in/check.v1" ) diff --git a/main.go b/main.go index 80079c048..93bda667d 100644 --- a/main.go +++ b/main.go @@ -20,11 +20,12 @@ import ( "fmt" "os" "runtime" + "sort" "strconv" "github.com/dustin/go-humanize" "github.com/minio/cli" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // Help template for minio. @@ -101,6 +102,19 @@ func findClosestCommands(command string) []string { for _, value := range commandsTree.PrefixMatch(command) { closestCommands = append(closestCommands, value.(string)) } + sort.Strings(closestCommands) + // Suggest other close commands - allow missed, wrongly added and + // even transposed characters + for _, value := range commandsTree.walk(commandsTree.root) { + if sort.SearchStrings(closestCommands, value.(string)) < len(closestCommands) { + continue + } + // 2 is arbitrary and represents the max + // allowed number of typed errors + if DamerauLevenshteinDistance(command, value.(string)) < 2 { + closestCommands = append(closestCommands, value.(string)) + } + } return closestCommands } diff --git a/notifier.go b/notifier.go index 321adb1e9..cc2d20daa 100644 --- a/notifier.go +++ b/notifier.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/fatih/color" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" "github.com/olekukonko/ts" ) diff --git a/object-handlers.go b/object-handlers.go index 8284ea32a..12271b4ab 100644 --- a/object-handlers.go +++ b/object-handlers.go @@ -22,8 +22,9 @@ import ( "strconv" "github.com/gorilla/mux" - "github.com/minio/minio-xl/pkg/probe" "github.com/minio/minio/pkg/fs" + "github.com/minio/minio/pkg/probe" + v4 "github.com/minio/minio/pkg/signature" ) const ( @@ -172,7 +173,7 @@ func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Req return } - var signature *fs.Signature + var signature *v4.Signature if isRequestSignatureV4(req) { // Init signature V4 verification var err *probe.Error @@ -209,7 +210,7 @@ func (api CloudStorageAPI) PutObjectHandler(w http.ResponseWriter, req *http.Req writeErrorResponse(w, req, BadDigest, req.URL.Path) case fs.MissingDateHeader: writeErrorResponse(w, req, RequestTimeTooSkewed, req.URL.Path) - case fs.SignatureDoesNotMatch: + case v4.SigDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case fs.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) @@ -320,7 +321,7 @@ func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http } } - var signature *fs.Signature + var signature *v4.Signature if isRequestSignatureV4(req) { // Init signature V4 verification var err *probe.Error @@ -353,7 +354,7 @@ func (api CloudStorageAPI) PutObjectPartHandler(w http.ResponseWriter, req *http writeErrorResponse(w, req, NoSuchUpload, req.URL.Path) case fs.BadDigest: writeErrorResponse(w, req, BadDigest, req.URL.Path) - case fs.SignatureDoesNotMatch: + case v4.SigDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case fs.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) @@ -475,7 +476,7 @@ func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, } objectResourcesMetadata := getObjectResources(req.URL.Query()) - var signature *fs.Signature + var signature *v4.Signature if isRequestSignatureV4(req) { // Init signature V4 verification var err *probe.Error @@ -516,7 +517,7 @@ func (api CloudStorageAPI) CompleteMultipartUploadHandler(w http.ResponseWriter, writeErrorResponse(w, req, InvalidPart, req.URL.Path) case fs.InvalidPartOrder: writeErrorResponse(w, req, InvalidPartOrder, req.URL.Path) - case fs.SignatureDoesNotMatch: + case v4.SigDoesNotMatch: writeErrorResponse(w, req, SignatureDoesNotMatch, req.URL.Path) case fs.IncompleteBody: writeErrorResponse(w, req, IncompleteBody, req.URL.Path) diff --git a/vendor/github.com/minio/minio-xl/pkg/atomic/atomic.go b/pkg/atomic/atomic.go similarity index 98% rename from vendor/github.com/minio/minio-xl/pkg/atomic/atomic.go rename to pkg/atomic/atomic.go index 56c7543aa..2912a8cf1 100644 --- a/vendor/github.com/minio/minio-xl/pkg/atomic/atomic.go +++ b/pkg/atomic/atomic.go @@ -1,5 +1,5 @@ /* - * Minio Client (C) 2015 Minio, Inc. + * Minio Cloud Storage (C) 2015-2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/vendor/github.com/minio/minio-xl/pkg/atomic/atomic_test.go b/pkg/atomic/atomic_test.go similarity index 96% rename from vendor/github.com/minio/minio-xl/pkg/atomic/atomic_test.go rename to pkg/atomic/atomic_test.go index 1f026e2a4..76ee3484d 100644 --- a/vendor/github.com/minio/minio-xl/pkg/atomic/atomic_test.go +++ b/pkg/atomic/atomic_test.go @@ -34,7 +34,7 @@ type MySuite struct { var _ = Suite(&MySuite{}) func (s *MySuite) SetUpSuite(c *C) { - root, err := ioutil.TempDir("/tmp", "atomic-") + root, err := ioutil.TempDir(os.TempDir(), "atomic-") c.Assert(err, IsNil) s.root = root } diff --git a/vendor/github.com/minio/minio-xl/pkg/cpu/cpu_amd64.go b/pkg/cpu/cpu_amd64.go similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/cpu/cpu_amd64.go rename to pkg/cpu/cpu_amd64.go diff --git a/vendor/github.com/minio/minio-xl/pkg/cpu/cpu_amd64.s b/pkg/cpu/cpu_amd64.s similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/cpu/cpu_amd64.s rename to pkg/cpu/cpu_amd64.s diff --git a/vendor/github.com/minio/minio-xl/pkg/cpu/cpu_arm.go b/pkg/cpu/cpu_arm.go similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/cpu/cpu_arm.go rename to pkg/cpu/cpu_arm.go diff --git a/vendor/github.com/minio/minio-xl/pkg/cpu/cpu_test.go b/pkg/cpu/cpu_test.go similarity index 97% rename from vendor/github.com/minio/minio-xl/pkg/cpu/cpu_test.go rename to pkg/cpu/cpu_test.go index a39787f1e..8286413e9 100644 --- a/vendor/github.com/minio/minio-xl/pkg/cpu/cpu_test.go +++ b/pkg/cpu/cpu_test.go @@ -23,7 +23,7 @@ import ( "strings" "testing" - "github.com/minio/minio-xl/pkg/cpu" + "github.com/minio/minio/pkg/cpu" . "gopkg.in/check.v1" ) diff --git a/vendor/github.com/minio/minio-xl/pkg/cpu/doc.go b/pkg/cpu/doc.go similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/cpu/doc.go rename to pkg/cpu/doc.go diff --git a/pkg/crypto/sha1/.gitignore b/pkg/crypto/sha1/.gitignore new file mode 100644 index 000000000..a11ae5b8a --- /dev/null +++ b/pkg/crypto/sha1/.gitignore @@ -0,0 +1 @@ +*.syso \ No newline at end of file diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/LICENSE b/pkg/crypto/sha1/LICENSE similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/LICENSE rename to pkg/crypto/sha1/LICENSE diff --git a/pkg/crypto/sha1/sha1.go b/pkg/crypto/sha1/sha1.go new file mode 100644 index 000000000..6cce320ab --- /dev/null +++ b/pkg/crypto/sha1/sha1.go @@ -0,0 +1,150 @@ +/* + * Minio Cloud Storage, (C) 2015-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file of +// Golang project: +// https://github.com/golang/go/blob/master/LICENSE + +// Using this part of Minio codebase under the license +// Apache License Version 2.0 with modifications + +// Package sha1 implements the SHA1 hash algorithm as defined in RFC 3174. +package sha1 + +import "hash" + +// Size - The size of a SHA1 checksum in bytes. +const Size = 20 + +// BlockSize - The blocksize of SHA1 in bytes. +const BlockSize = 64 + +const ( + chunk = 64 + init0 = 0x67452301 + init1 = 0xEFCDAB89 + init2 = 0x98BADCFE + init3 = 0x10325476 + init4 = 0xC3D2E1F0 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + h [5]uint32 + x [chunk]byte + nx int + len uint64 +} + +// Reset digest +func (d *digest) Reset() { + d.h[0] = init0 + d.h[1] = init1 + d.h[2] = init2 + d.h[3] = init3 + d.h[4] = init4 + d.nx = 0 + d.len = 0 +} + +// New returns a new hash.Hash computing the SHA1 checksum. +func New() hash.Hash { + d := new(digest) + d.Reset() + return d +} + +// Return output size +func (d *digest) Size() int { return Size } + +// Return checksum blocksize +func (d *digest) BlockSize() int { return BlockSize } + +// Write to digest +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.len += uint64(nn) + if d.nx > 0 { + n := copy(d.x[d.nx:], p) + d.nx += n + if d.nx == chunk { + block(d, d.x[:]) + d.nx = 0 + } + p = p[n:] + } + if len(p) >= chunk { + n := len(p) &^ (chunk - 1) + block(d, p[:n]) + p = p[n:] + } + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +// Return checksum bytes +func (d *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d0 := *d + hash := d0.checkSum() + return append(in, hash[:]...) +} + +// Intermediate checksum function +func (d *digest) checkSum() [Size]byte { + len := d.len + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + var tmp [64]byte + tmp[0] = 0x80 + if len%64 < 56 { + d.Write(tmp[0 : 56-len%64]) + } else { + d.Write(tmp[0 : 64+56-len%64]) + } + + // Length in bits. + len <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(len >> (56 - 8*i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var digest [Size]byte + for i, s := range d.h { + digest[i*4] = byte(s >> 24) + digest[i*4+1] = byte(s >> 16) + digest[i*4+2] = byte(s >> 8) + digest[i*4+3] = byte(s) + } + + return digest +} + +// Sum - single caller sha1 helper +func Sum(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} diff --git a/pkg/crypto/sha1/sha1_linux.S b/pkg/crypto/sha1/sha1_linux.S new file mode 100644 index 000000000..49887325d --- /dev/null +++ b/pkg/crypto/sha1/sha1_linux.S @@ -0,0 +1,967 @@ +/* + * Implement fast SHA-1 with AVX2 instructions. (x86_64) + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * Contact Information: + * Ilya Albrekht + * Maxim Locktyukhin + * Ronen Zohar + * Chandramouli Narayanan + * + * BSD LICENSE + * + * Copyright(c) 2014 Intel Corporation. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +/* + * SHA-1 implementation with Intel(R) AVX2 instruction set extensions. + * + *This implementation is based on the previous SSSE3 release: + * https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 + * + *Updates 20-byte SHA-1 record in 'hash' for even number of + *'num_blocks' consecutive 64-byte blocks + * + */ + +/* + * Using this part of Minio codebase under the license + * Apache License Version 2.0 with modifications + * + */ + +#ifdef HAS_AVX2 +#ifndef ENTRY +#define ENTRY(name) \ + .globl name ; \ + .align 4,0x90 ; \ + name: +#endif + +#ifndef END +#define END(name) \ + .size name, .-name +#endif + +#ifndef ENDPROC +#define ENDPROC(name) \ + .type name, @function ; \ + END(name) +#endif + +#define NUM_INVALID 100 + +#define TYPE_R32 0 +#define TYPE_R64 1 +#define TYPE_XMM 2 +#define TYPE_INVALID 100 + + .macro R32_NUM opd r32 + \opd = NUM_INVALID + .ifc \r32,%eax + \opd = 0 + .endif + .ifc \r32,%ecx + \opd = 1 + .endif + .ifc \r32,%edx + \opd = 2 + .endif + .ifc \r32,%ebx + \opd = 3 + .endif + .ifc \r32,%esp + \opd = 4 + .endif + .ifc \r32,%ebp + \opd = 5 + .endif + .ifc \r32,%esi + \opd = 6 + .endif + .ifc \r32,%edi + \opd = 7 + .endif +#ifdef X86_64 + .ifc \r32,%r8d + \opd = 8 + .endif + .ifc \r32,%r9d + \opd = 9 + .endif + .ifc \r32,%r10d + \opd = 10 + .endif + .ifc \r32,%r11d + \opd = 11 + .endif + .ifc \r32,%r12d + \opd = 12 + .endif + .ifc \r32,%r13d + \opd = 13 + .endif + .ifc \r32,%r14d + \opd = 14 + .endif + .ifc \r32,%r15d + \opd = 15 + .endif +#endif + .endm + + .macro R64_NUM opd r64 + \opd = NUM_INVALID +#ifdef X86_64 + .ifc \r64,%rax + \opd = 0 + .endif + .ifc \r64,%rcx + \opd = 1 + .endif + .ifc \r64,%rdx + \opd = 2 + .endif + .ifc \r64,%rbx + \opd = 3 + .endif + .ifc \r64,%rsp + \opd = 4 + .endif + .ifc \r64,%rbp + \opd = 5 + .endif + .ifc \r64,%rsi + \opd = 6 + .endif + .ifc \r64,%rdi + \opd = 7 + .endif + .ifc \r64,%r8 + \opd = 8 + .endif + .ifc \r64,%r9 + \opd = 9 + .endif + .ifc \r64,%r10 + \opd = 10 + .endif + .ifc \r64,%r11 + \opd = 11 + .endif + .ifc \r64,%r12 + \opd = 12 + .endif + .ifc \r64,%r13 + \opd = 13 + .endif + .ifc \r64,%r14 + \opd = 14 + .endif + .ifc \r64,%r15 + \opd = 15 + .endif +#endif + .endm + + .macro XMM_NUM opd xmm + \opd = NUM_INVALID + .ifc \xmm,%xmm0 + \opd = 0 + .endif + .ifc \xmm,%xmm1 + \opd = 1 + .endif + .ifc \xmm,%xmm2 + \opd = 2 + .endif + .ifc \xmm,%xmm3 + \opd = 3 + .endif + .ifc \xmm,%xmm4 + \opd = 4 + .endif + .ifc \xmm,%xmm5 + \opd = 5 + .endif + .ifc \xmm,%xmm6 + \opd = 6 + .endif + .ifc \xmm,%xmm7 + \opd = 7 + .endif + .ifc \xmm,%xmm8 + \opd = 8 + .endif + .ifc \xmm,%xmm9 + \opd = 9 + .endif + .ifc \xmm,%xmm10 + \opd = 10 + .endif + .ifc \xmm,%xmm11 + \opd = 11 + .endif + .ifc \xmm,%xmm12 + \opd = 12 + .endif + .ifc \xmm,%xmm13 + \opd = 13 + .endif + .ifc \xmm,%xmm14 + \opd = 14 + .endif + .ifc \xmm,%xmm15 + \opd = 15 + .endif + .endm + + .macro TYPE type reg + R32_NUM reg_type_r32 \reg + R64_NUM reg_type_r64 \reg + XMM_NUM reg_type_xmm \reg + .if reg_type_r64 <> NUM_INVALID + \type = TYPE_R64 + .elseif reg_type_r32 <> NUM_INVALID + \type = TYPE_R32 + .elseif reg_type_xmm <> NUM_INVALID + \type = TYPE_XMM + .else + \type = TYPE_INVALID + .endif + .endm + + .macro PFX_OPD_SIZE + .byte 0x66 + .endm + + .macro PFX_REX opd1 opd2 W=0 + .if ((\opd1 | \opd2) & 8) || \W + .byte 0x40 | ((\opd1 & 8) >> 3) | ((\opd2 & 8) >> 1) | (\W << 3) + .endif + .endm + + .macro MODRM mod opd1 opd2 + .byte \mod | (\opd1 & 7) | ((\opd2 & 7) << 3) + .endm + + .macro PSHUFB_XMM xmm1 xmm2 + XMM_NUM pshufb_opd1 \xmm1 + XMM_NUM pshufb_opd2 \xmm2 + PFX_OPD_SIZE + PFX_REX pshufb_opd1 pshufb_opd2 + .byte 0x0f, 0x38, 0x00 + MODRM 0xc0 pshufb_opd1 pshufb_opd2 + .endm + + .macro PCLMULQDQ imm8 xmm1 xmm2 + XMM_NUM clmul_opd1 \xmm1 + XMM_NUM clmul_opd2 \xmm2 + PFX_OPD_SIZE + PFX_REX clmul_opd1 clmul_opd2 + .byte 0x0f, 0x3a, 0x44 + MODRM 0xc0 clmul_opd1 clmul_opd2 + .byte \imm8 + .endm + + .macro PEXTRD imm8 xmm gpr + R32_NUM extrd_opd1 \gpr + XMM_NUM extrd_opd2 \xmm + PFX_OPD_SIZE + PFX_REX extrd_opd1 extrd_opd2 + .byte 0x0f, 0x3a, 0x16 + MODRM 0xc0 extrd_opd1 extrd_opd2 + .byte \imm8 + .endm + + .macro MOVQ_R64_XMM opd1 opd2 + TYPE movq_r64_xmm_opd1_type \opd1 + .if movq_r64_xmm_opd1_type == TYPE_XMM + XMM_NUM movq_r64_xmm_opd1 \opd1 + R64_NUM movq_r64_xmm_opd2 \opd2 + .else + R64_NUM movq_r64_xmm_opd1 \opd1 + XMM_NUM movq_r64_xmm_opd2 \opd2 + .endif + PFX_OPD_SIZE + PFX_REX movq_r64_xmm_opd1 movq_r64_xmm_opd2 1 + .if movq_r64_xmm_opd1_type == TYPE_XMM + .byte 0x0f, 0x7e + .else + .byte 0x0f, 0x6e + .endif + MODRM 0xc0 movq_r64_xmm_opd1 movq_r64_xmm_opd2 + .endm + +#define CTX %rdi /* arg1 */ +#define BUF %rsi /* arg2 */ +#define CNT %rdx /* arg3 */ + +#define REG_A %ecx +#define REG_B %esi +#define REG_C %edi +#define REG_D %eax +#define REG_E %edx +#define REG_TB %ebx +#define REG_TA %r12d +#define REG_RA %rcx +#define REG_RB %rsi +#define REG_RC %rdi +#define REG_RD %rax +#define REG_RE %rdx +#define REG_RTA %r12 +#define REG_RTB %rbx +#define REG_T1 %ebp +#define xmm_mov vmovups +#define avx2_zeroupper vzeroupper +#define RND_F1 1 +#define RND_F2 2 +#define RND_F3 3 + +.macro REGALLOC + .set A, REG_A + .set B, REG_B + .set C, REG_C + .set D, REG_D + .set E, REG_E + .set TB, REG_TB + .set TA, REG_TA + + .set RA, REG_RA + .set RB, REG_RB + .set RC, REG_RC + .set RD, REG_RD + .set RE, REG_RE + + .set RTA, REG_RTA + .set RTB, REG_RTB + + .set T1, REG_T1 +.endm + +#define K_BASE %r8 +#define HASH_PTR %r9 +#define BUFFER_PTR %r10 +#define BUFFER_PTR2 %r13 +#define BUFFER_END %r11 + +#define PRECALC_BUF %r14 +#define WK_BUF %r15 + +#define W_TMP %xmm0 +#define WY_TMP %ymm0 +#define WY_TMP2 %ymm9 + +# AVX2 variables +#define WY0 %ymm3 +#define WY4 %ymm5 +#define WY08 %ymm7 +#define WY12 %ymm8 +#define WY16 %ymm12 +#define WY20 %ymm13 +#define WY24 %ymm14 +#define WY28 %ymm15 + +#define YMM_SHUFB_BSWAP %ymm10 + +/* + * Keep 2 iterations precalculated at a time: + * - 80 DWORDs per iteration * 2 + */ +#define W_SIZE (80*2*2 +16) + +#define WK(t) ((((t) % 80) / 4)*32 + ( (t) % 4)*4 + ((t)/80)*16 )(WK_BUF) +#define PRECALC_WK(t) ((t)*2*2)(PRECALC_BUF) + + +.macro UPDATE_HASH hash, val + add \hash, \val + mov \val, \hash +.endm + +.macro PRECALC_RESET_WY + .set WY_00, WY0 + .set WY_04, WY4 + .set WY_08, WY08 + .set WY_12, WY12 + .set WY_16, WY16 + .set WY_20, WY20 + .set WY_24, WY24 + .set WY_28, WY28 + .set WY_32, WY_00 +.endm + +.macro PRECALC_ROTATE_WY + /* Rotate macros */ + .set WY_32, WY_28 + .set WY_28, WY_24 + .set WY_24, WY_20 + .set WY_20, WY_16 + .set WY_16, WY_12 + .set WY_12, WY_08 + .set WY_08, WY_04 + .set WY_04, WY_00 + .set WY_00, WY_32 + + /* Define register aliases */ + .set WY, WY_00 + .set WY_minus_04, WY_04 + .set WY_minus_08, WY_08 + .set WY_minus_12, WY_12 + .set WY_minus_16, WY_16 + .set WY_minus_20, WY_20 + .set WY_minus_24, WY_24 + .set WY_minus_28, WY_28 + .set WY_minus_32, WY +.endm + +.macro PRECALC_00_15 + .if (i == 0) # Initialize and rotate registers + PRECALC_RESET_WY + PRECALC_ROTATE_WY + .endif + + /* message scheduling pre-compute for rounds 0-15 */ + .if ((i & 7) == 0) + /* + * blended AVX2 and ALU instruction scheduling + * 1 vector iteration per 8 rounds + */ + vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP + .elseif ((i & 7) == 1) + vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\ + WY_TMP, WY_TMP + .elseif ((i & 7) == 2) + vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY + .elseif ((i & 7) == 4) + vpaddd K_XMM(K_BASE), WY, WY_TMP + .elseif ((i & 7) == 7) + vmovdqu WY_TMP, PRECALC_WK(i&~7) + + PRECALC_ROTATE_WY + .endif +.endm + +.macro PRECALC_16_31 + /* + * message scheduling pre-compute for rounds 16-31 + * calculating last 32 w[i] values in 8 XMM registers + * pre-calculate K+w[i] values and store to mem + * for later load by ALU add instruction + * + * "brute force" vectorization for rounds 16-31 only + * due to w[i]->w[i-3] dependency + */ + .if ((i & 7) == 0) + /* + * blended AVX2 and ALU instruction scheduling + * 1 vector iteration per 8 rounds + */ + /* w[i-14] */ + vpalignr $8, WY_minus_16, WY_minus_12, WY + vpsrldq $4, WY_minus_04, WY_TMP /* w[i-3] */ + .elseif ((i & 7) == 1) + vpxor WY_minus_08, WY, WY + vpxor WY_minus_16, WY_TMP, WY_TMP + .elseif ((i & 7) == 2) + vpxor WY_TMP, WY, WY + vpslldq $12, WY, WY_TMP2 + .elseif ((i & 7) == 3) + vpslld $1, WY, WY_TMP + vpsrld $31, WY, WY + .elseif ((i & 7) == 4) + vpor WY, WY_TMP, WY_TMP + vpslld $2, WY_TMP2, WY + .elseif ((i & 7) == 5) + vpsrld $30, WY_TMP2, WY_TMP2 + vpxor WY, WY_TMP, WY_TMP + .elseif ((i & 7) == 7) + vpxor WY_TMP2, WY_TMP, WY + vpaddd K_XMM(K_BASE), WY, WY_TMP + vmovdqu WY_TMP, PRECALC_WK(i&~7) + + PRECALC_ROTATE_WY + .endif +.endm + +.macro PRECALC_32_79 + /* + * in SHA-1 specification: + * w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 + * instead we do equal: + * w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 + * allows more efficient vectorization + * since w[i]=>w[i-3] dependency is broken + */ + + .if ((i & 7) == 0) + /* + * blended AVX2 and ALU instruction scheduling + * 1 vector iteration per 8 rounds + */ + vpalignr $8, WY_minus_08, WY_minus_04, WY_TMP + .elseif ((i & 7) == 1) + /* W is W_minus_32 before xor */ + vpxor WY_minus_28, WY, WY + .elseif ((i & 7) == 2) + vpxor WY_minus_16, WY_TMP, WY_TMP + .elseif ((i & 7) == 3) + vpxor WY_TMP, WY, WY + .elseif ((i & 7) == 4) + vpslld $2, WY, WY_TMP + .elseif ((i & 7) == 5) + vpsrld $30, WY, WY + vpor WY, WY_TMP, WY + .elseif ((i & 7) == 7) + vpaddd K_XMM(K_BASE), WY, WY_TMP + vmovdqu WY_TMP, PRECALC_WK(i&~7) + + PRECALC_ROTATE_WY + .endif +.endm + +.macro PRECALC r, s + .set i, \r + + .if (i < 40) + .set K_XMM, 32*0 + .elseif (i < 80) + .set K_XMM, 32*1 + .elseif (i < 120) + .set K_XMM, 32*2 + .else + .set K_XMM, 32*3 + .endif + + .if (i<32) + PRECALC_00_15 \s + .elseif (i<64) + PRECALC_16_31 \s + .elseif (i < 160) + PRECALC_32_79 \s + .endif +.endm + +.macro ROTATE_STATE + .set T_REG, E + .set E, D + .set D, C + .set C, B + .set B, TB + .set TB, A + .set A, T_REG + + .set T_REG, RE + .set RE, RD + .set RD, RC + .set RC, RB + .set RB, RTB + .set RTB, RA + .set RA, T_REG +.endm + +/* Macro relies on saved ROUND_Fx */ + +.macro RND_FUN f, r + .if (\f == RND_F1) + ROUND_F1 \r + .elseif (\f == RND_F2) + ROUND_F2 \r + .elseif (\f == RND_F3) + ROUND_F3 \r + .endif +.endm + +.macro RR r + .set round_id, (\r % 80) + + .if (round_id == 0) /* Precalculate F for first round */ + .set ROUND_FUNC, RND_F1 + mov B, TB + + rorx $(32-30), B, B /* b>>>2 */ + andn D, TB, T1 + and C, TB + xor T1, TB + .endif + + RND_FUN ROUND_FUNC, \r + ROTATE_STATE + + .if (round_id == 18) + .set ROUND_FUNC, RND_F2 + .elseif (round_id == 38) + .set ROUND_FUNC, RND_F3 + .elseif (round_id == 58) + .set ROUND_FUNC, RND_F2 + .endif + + .set round_id, ( (\r+1) % 80) + + RND_FUN ROUND_FUNC, (\r+1) + ROTATE_STATE +.endm + +.macro ROUND_F1 r + add WK(\r), E + + andn C, A, T1 /* ~b&d */ + lea (RE,RTB), E /* Add F from the previous round */ + + rorx $(32-5), A, TA /* T2 = A >>> 5 */ + rorx $(32-30),A, TB /* b>>>2 for next round */ + + PRECALC (\r) /* msg scheduling for next 2 blocks */ + + /* + * Calculate F for the next round + * (b & c) ^ andn[b, d] + */ + and B, A /* b&c */ + xor T1, A /* F1 = (b&c) ^ (~b&d) */ + + lea (RE,RTA), E /* E += A >>> 5 */ +.endm + +.macro ROUND_F2 r + add WK(\r), E + lea (RE,RTB), E /* Add F from the previous round */ + + /* Calculate F for the next round */ + rorx $(32-5), A, TA /* T2 = A >>> 5 */ + .if ((round_id) < 79) + rorx $(32-30), A, TB /* b>>>2 for next round */ + .endif + PRECALC (\r) /* msg scheduling for next 2 blocks */ + + .if ((round_id) < 79) + xor B, A + .endif + + add TA, E /* E += A >>> 5 */ + + .if ((round_id) < 79) + xor C, A + .endif +.endm + +.macro ROUND_F3 r + add WK(\r), E + PRECALC (\r) /* msg scheduling for next 2 blocks */ + + lea (RE,RTB), E /* Add F from the previous round */ + + mov B, T1 + or A, T1 + + rorx $(32-5), A, TA /* T2 = A >>> 5 */ + rorx $(32-30), A, TB /* b>>>2 for next round */ + + /* Calculate F for the next round + * (b and c) or (d and (b or c)) + */ + and C, T1 + and B, A + or T1, A + + add TA, E /* E += A >>> 5 */ + +.endm + +/* + * macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining + */ +.macro SHA1_PIPELINED_MAIN_BODY + + REGALLOC + + mov (HASH_PTR), A + mov 4(HASH_PTR), B + mov 8(HASH_PTR), C + mov 12(HASH_PTR), D + mov 16(HASH_PTR), E + + mov %rsp, PRECALC_BUF + lea (2*4*80+32)(%rsp), WK_BUF + + # Precalc WK for first 2 blocks + PRECALC_OFFSET = 0 + .set i, 0 + .rept 160 + PRECALC i + .set i, i + 1 + .endr + PRECALC_OFFSET = 128 + xchg WK_BUF, PRECALC_BUF + + .align 32 +_loop: + /* + * code loops through more than one block + * we use K_BASE value as a signal of a last block, + * it is set below by: cmovae BUFFER_PTR, K_BASE + */ + cmp K_BASE, BUFFER_PTR + jne _begin + .align 32 + jmp _end + .align 32 +_begin: + + /* + * Do first block + * rounds: 0,2,4,6,8 + */ + .set j, 0 + .rept 5 + RR j + .set j, j+2 + .endr + + jmp _loop0 +_loop0: + + /* + * rounds: + * 10,12,14,16,18 + * 20,22,24,26,28 + * 30,32,34,36,38 + * 40,42,44,46,48 + * 50,52,54,56,58 + */ + .rept 25 + RR j + .set j, j+2 + .endr + + add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */ + cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */ + cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ + + /* + * rounds + * 60,62,64,66,68 + * 70,72,74,76,78 + */ + .rept 10 + RR j + .set j, j+2 + .endr + + UPDATE_HASH (HASH_PTR), A + UPDATE_HASH 4(HASH_PTR), TB + UPDATE_HASH 8(HASH_PTR), C + UPDATE_HASH 12(HASH_PTR), D + UPDATE_HASH 16(HASH_PTR), E + + cmp K_BASE, BUFFER_PTR /* is current block the last one? */ + je _loop + + mov TB, B + + /* Process second block */ + /* + * rounds + * 0+80, 2+80, 4+80, 6+80, 8+80 + * 10+80,12+80,14+80,16+80,18+80 + */ + + .set j, 0 + .rept 10 + RR j+80 + .set j, j+2 + .endr + + jmp _loop1 +_loop1: + /* + * rounds + * 20+80,22+80,24+80,26+80,28+80 + * 30+80,32+80,34+80,36+80,38+80 + */ + .rept 10 + RR j+80 + .set j, j+2 + .endr + + jmp _loop2 +_loop2: + + /* + * rounds + * 40+80,42+80,44+80,46+80,48+80 + * 50+80,52+80,54+80,56+80,58+80 + */ + .rept 10 + RR j+80 + .set j, j+2 + .endr + + add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */ + + cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */ + cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */ + + jmp _loop3 +_loop3: + + /* + * rounds + * 60+80,62+80,64+80,66+80,68+80 + * 70+80,72+80,74+80,76+80,78+80 + */ + .rept 10 + RR j+80 + .set j, j+2 + .endr + + UPDATE_HASH (HASH_PTR), A + UPDATE_HASH 4(HASH_PTR), TB + UPDATE_HASH 8(HASH_PTR), C + UPDATE_HASH 12(HASH_PTR), D + UPDATE_HASH 16(HASH_PTR), E + + /* Reset state for AVX2 reg permutation */ + mov A, TA + mov TB, A + mov C, TB + mov E, C + mov D, B + mov TA, D + + REGALLOC + + xchg WK_BUF, PRECALC_BUF + + jmp _loop + + .align 32 + _end: + +.endm + +.section .rodata + +#define K1 0x5a827999 +#define K2 0x6ed9eba1 +#define K3 0x8f1bbcdc +#define K4 0xca62c1d6 + +.align 128 +K_XMM_AR: + .long K1, K1, K1, K1 + .long K1, K1, K1, K1 + .long K2, K2, K2, K2 + .long K2, K2, K2, K2 + .long K3, K3, K3, K3 + .long K3, K3, K3, K3 + .long K4, K4, K4, K4 + .long K4, K4, K4, K4 + +BSWAP_SHUFB_CTL: + .long 0x00010203 + .long 0x04050607 + .long 0x08090a0b + .long 0x0c0d0e0f + .long 0x00010203 + .long 0x04050607 + .long 0x08090a0b + .long 0x0c0d0e0f + +# void sha1_transform(int32_t *hash, const char* input, size_t num_blocks) ; + .text + ENTRY(sha1_transform) + + push %rbx + push %rbp + push %r12 + push %r13 + push %r14 + push %r15 + + RESERVE_STACK = (W_SIZE*4 + 8+24) + + /* Align stack */ + mov %rsp, %rbx + and $~(0x20-1), %rsp + push %rbx + sub $RESERVE_STACK, %rsp + + avx2_zeroupper + + lea K_XMM_AR(%rip), K_BASE + + mov CTX, HASH_PTR + mov BUF, BUFFER_PTR + lea 64(BUF), BUFFER_PTR2 + + shl $6, CNT /* mul by 64 */ + add BUF, CNT + add $64, CNT + mov CNT, BUFFER_END + + cmp BUFFER_END, BUFFER_PTR2 + cmovae K_BASE, BUFFER_PTR2 + + xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP + + SHA1_PIPELINED_MAIN_BODY + + avx2_zeroupper + + add $RESERVE_STACK, %rsp + pop %rsp + + pop %r15 + pop %r14 + pop %r13 + pop %r12 + pop %rbp + pop %rbx + + ret + + ENDPROC(sha1_transform) +#endif diff --git a/pkg/crypto/sha1/sha1_sse3_amd64.asm b/pkg/crypto/sha1/sha1_sse3_amd64.asm new file mode 100644 index 000000000..e91967a6f --- /dev/null +++ b/pkg/crypto/sha1/sha1_sse3_amd64.asm @@ -0,0 +1,579 @@ +;--------------------- +; https://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1 +; +; License information: +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; This implementation notably advances the performance of SHA-1 algorithm compared to existing +; implementations. We are encouraging all projects utilizing SHA-1 to integrate this new fast +; implementation and are ready to help if issues or concerns arise (you are welcome to leave +; a comment or write an email to the authors). It is provided 'as is' and free for either +; commercial or non-commercial use. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; This code implements two interfaces of SHA-1 update function: 1) working on a single +; 64-byte block and 2) working on a buffer of multiple 64-bit blocks. Multiple blocks +; version of code is software pipelined and faster overall, it is a default. Assemble +; with -DINTEL_SHA1_SINGLEBLOCK to select single 64-byte block function interface. +; +; C++ prototypes of implemented functions are below: +; +; #ifndef INTEL_SHA1_SINGLEBLOCK +; // Updates 20-byte SHA-1 record in 'hash' for 'num_blocks' consequtive 64-byte blocks +; extern "C" void sha1_update_intel(int *hash, const char* input, size_t num_blocks ); +; #else +; // Updates 20-byte SHA-1 record in 'hash' for one 64-byte block pointed by 'input' +; extern "C" void sha1_update_intel(int *hash, const char* input); +; #endif +; +; Function name 'sha1_update_intel' can be changed in the source or via macro: +; -DINTEL_SHA1_UPDATE_FUNCNAME=my_sha1_update_func_name +; +; It implements both UNIX(default) and Windows ABIs, use -DWIN_ABI on Windows +; +; Code checks CPU for SSSE3 support via CPUID feature flag (CPUID.1.ECX.SSSE3[bit 9]==1), +; and performs dispatch. Since in most cases the functionality on non-SSSE3 supporting CPUs +; is also required, the default (e.g. one being replaced) function can be provided for +; dispatch on such CPUs, the name of old function can be changed in the source or via macro: +; -DINTEL_SHA1_UPDATE_DEFAULT_DISPATCH=default_sha1_update_function_name +; +; Authors: Maxim Locktyukhin and Ronen Zohar at Intel.com +; + +%ifndef INTEL_SHA1_UPDATE_DEFAULT_DISPATCH + ;; can be replaced with a default SHA-1 update function name +%define INTEL_SHA1_UPDATE_DEFAULT_DISPATCH sha1_intel_non_ssse3_cpu_stub_ +%else +extern INTEL_SHA1_UPDATE_DEFAULT_DISPATCH +%endif + +;; provide alternative SHA-1 update function's name here +%ifndef INTEL_SHA1_UPDATE_FUNCNAME +%define INTEL_SHA1_UPDATE_FUNCNAME sha1_update_intel +%endif + +global INTEL_SHA1_UPDATE_FUNCNAME + + +%ifndef INTEL_SHA1_SINGLEBLOCK +%assign multiblock 1 +%else +%assign multiblock 0 +%endif + + +bits 64 +default rel + +%ifdef WIN_ABI + %xdefine arg1 rcx + %xdefine arg2 rdx + %xdefine arg3 r8 +%else + %xdefine arg1 rdi + %xdefine arg2 rsi + %xdefine arg3 rdx +%endif + +%xdefine ctx arg1 +%xdefine buf arg2 +%xdefine cnt arg3 + +%macro REGALLOC 0 + %xdefine A ecx + %xdefine B esi + %xdefine C edi + %xdefine D ebp + %xdefine E edx + + %xdefine T1 eax + %xdefine T2 ebx +%endmacro + +%xdefine K_BASE r8 +%xdefine HASH_PTR r9 +%xdefine BUFFER_PTR r10 +%xdefine BUFFER_END r11 + +%xdefine W_TMP xmm0 +%xdefine W_TMP2 xmm9 + +%xdefine W0 xmm1 +%xdefine W4 xmm2 +%xdefine W8 xmm3 +%xdefine W12 xmm4 +%xdefine W16 xmm5 +%xdefine W20 xmm6 +%xdefine W24 xmm7 +%xdefine W28 xmm8 + +%xdefine XMM_SHUFB_BSWAP xmm10 + +;; we keep window of 64 w[i]+K pre-calculated values in a circular buffer +%xdefine WK(t) (rsp + (t & 15)*4) + +;------------------------------------------------------------------------------ +; +; macro implements SHA-1 function's body for single or several 64-byte blocks +; first param: function's name +; second param: =0 - function implements single 64-byte block hash +; =1 - function implements multiple64-byte blocks hash +; 3rd function's argument is a number, greater 0, of 64-byte blocks to calc hash for +; +%macro SHA1_VECTOR_ASM 2 +align 4096 +%1: + push rbx + push rbp + + %ifdef WIN_ABI + push rdi + push rsi + + %xdefine stack_size (16*4 + 16*5 + 8) + %else + %xdefine stack_size (16*4 + 8) + %endif + + sub rsp, stack_size + + %ifdef WIN_ABI + %xdefine xmm_save_base (rsp + 16*4) + + xmm_mov [xmm_save_base + 0*16], xmm6 + xmm_mov [xmm_save_base + 1*16], xmm7 + xmm_mov [xmm_save_base + 2*16], xmm8 + xmm_mov [xmm_save_base + 3*16], xmm9 + xmm_mov [xmm_save_base + 4*16], xmm10 + %endif + + mov HASH_PTR, ctx + mov BUFFER_PTR, buf + + %if (%2 == 1) + shl cnt, 6 ;; mul by 64 + add cnt, buf + mov BUFFER_END, cnt + %endif + + lea K_BASE, [K_XMM_AR] + xmm_mov XMM_SHUFB_BSWAP, [bswap_shufb_ctl] + + SHA1_PIPELINED_MAIN_BODY %2 + + %ifdef WIN_ABI + xmm_mov xmm6, [xmm_save_base + 0*16] + xmm_mov xmm7, [xmm_save_base + 1*16] + xmm_mov xmm8, [xmm_save_base + 2*16] + xmm_mov xmm9, [xmm_save_base + 3*16] + xmm_mov xmm10,[xmm_save_base + 4*16] + %endif + + add rsp, stack_size + + %ifdef WIN_ABI + pop rsi + pop rdi + %endif + + pop rbp + pop rbx + + ret +%endmacro + +;-------------------------------------------- +; macro implements 80 rounds of SHA-1, for one 64-byte block or multiple blocks with s/w pipelining +; macro param: =0 - process single 64-byte block +; =1 - multiple blocks +; +%macro SHA1_PIPELINED_MAIN_BODY 1 + + REGALLOC + + mov A, [HASH_PTR ] + mov B, [HASH_PTR+ 4] + mov C, [HASH_PTR+ 8] + mov D, [HASH_PTR+12] + + mov E, [HASH_PTR+16] + + %assign i 0 + %rep W_PRECALC_AHEAD + W_PRECALC i + %assign i i+1 + %endrep + + %xdefine F F1 + + %if (%1 == 1) ;; code loops through more than one block + %%_loop: + cmp BUFFER_PTR, K_BASE ;; we use K_BASE value as a signal of a last block, + jne %%_begin ;; it is set below by: cmovae BUFFER_PTR, K_BASE + jmp %%_end + + align 32 + %%_begin: + %endif + RR A,B,C,D,E,0 + RR D,E,A,B,C,2 + RR B,C,D,E,A,4 + RR E,A,B,C,D,6 + RR C,D,E,A,B,8 + + RR A,B,C,D,E,10 + RR D,E,A,B,C,12 + RR B,C,D,E,A,14 + RR E,A,B,C,D,16 + RR C,D,E,A,B,18 + + %xdefine F F2 + + RR A,B,C,D,E,20 + RR D,E,A,B,C,22 + RR B,C,D,E,A,24 + RR E,A,B,C,D,26 + RR C,D,E,A,B,28 + + RR A,B,C,D,E,30 + RR D,E,A,B,C,32 + RR B,C,D,E,A,34 + RR E,A,B,C,D,36 + RR C,D,E,A,B,38 + + %xdefine F F3 + + RR A,B,C,D,E,40 + RR D,E,A,B,C,42 + RR B,C,D,E,A,44 + RR E,A,B,C,D,46 + RR C,D,E,A,B,48 + + RR A,B,C,D,E,50 + RR D,E,A,B,C,52 + RR B,C,D,E,A,54 + RR E,A,B,C,D,56 + RR C,D,E,A,B,58 + + %xdefine F F4 + + %if (%1 == 1) ;; if code loops through more than one block + add BUFFER_PTR, 64 ;; move to next 64-byte block + cmp BUFFER_PTR, BUFFER_END ;; check if current block is the last one + cmovae BUFFER_PTR, K_BASE ;; smart way to signal the last iteration + %else + %xdefine W_NO_TAIL_PRECALC 1 ;; no software pipelining for single block interface + %endif + + RR A,B,C,D,E,60 + RR D,E,A,B,C,62 + RR B,C,D,E,A,64 + RR E,A,B,C,D,66 + RR C,D,E,A,B,68 + + RR A,B,C,D,E,70 + RR D,E,A,B,C,72 + RR B,C,D,E,A,74 + RR E,A,B,C,D,76 + RR C,D,E,A,B,78 + + UPDATE_HASH [HASH_PTR ],A + UPDATE_HASH [HASH_PTR+ 4],B + UPDATE_HASH [HASH_PTR+ 8],C + UPDATE_HASH [HASH_PTR+12],D + UPDATE_HASH [HASH_PTR+16],E + + %if (%1 == 1) + jmp %%_loop + + align 32 + %%_end: + %endif + + + %xdefine W_NO_TAIL_PRECALC 0 + %xdefine F %error + +%endmacro + + +%macro F1 3 + mov T1,%2 + xor T1,%3 + and T1,%1 + xor T1,%3 +%endmacro + +%macro F2 3 + mov T1,%3 + xor T1,%2 + xor T1,%1 +%endmacro + +%macro F3 3 + mov T1,%2 + mov T2,%1 + or T1,%1 + and T2,%2 + and T1,%3 + or T1,T2 +%endmacro + +%define F4 F2 + +%macro UPDATE_HASH 2 + add %2, %1 + mov %1, %2 +%endmacro + + +%macro W_PRECALC 1 + %xdefine i (%1) + + %if (i < 20) + %xdefine K_XMM 0 + %elif (i < 40) + %xdefine K_XMM 16 + %elif (i < 60) + %xdefine K_XMM 32 + %else + %xdefine K_XMM 48 + %endif + + %if (i<16 || (i>=80 && i<(80 + W_PRECALC_AHEAD))) + + %if (W_NO_TAIL_PRECALC == 0) + + %xdefine i ((%1) % 80) ;; pre-compute for the next iteration + + %if (i == 0) + W_PRECALC_RESET + %endif + + + W_PRECALC_00_15 + %endif + + %elif (i < 32) + W_PRECALC_16_31 + %elif (i < 80) ;; rounds 32-79 + W_PRECALC_32_79 + %endif +%endmacro + +%macro W_PRECALC_RESET 0 + %xdefine W W0 + %xdefine W_minus_04 W4 + %xdefine W_minus_08 W8 + %xdefine W_minus_12 W12 + %xdefine W_minus_16 W16 + %xdefine W_minus_20 W20 + %xdefine W_minus_24 W24 + %xdefine W_minus_28 W28 + %xdefine W_minus_32 W +%endmacro + +%macro W_PRECALC_ROTATE 0 + %xdefine W_minus_32 W_minus_28 + %xdefine W_minus_28 W_minus_24 + %xdefine W_minus_24 W_minus_20 + %xdefine W_minus_20 W_minus_16 + %xdefine W_minus_16 W_minus_12 + %xdefine W_minus_12 W_minus_08 + %xdefine W_minus_08 W_minus_04 + %xdefine W_minus_04 W + %xdefine W W_minus_32 +%endmacro + +%xdefine W_PRECALC_AHEAD 16 +%xdefine W_NO_TAIL_PRECALC 0 + + +%xdefine xmm_mov movdqa + +%macro W_PRECALC_00_15 0 + ;; message scheduling pre-compute for rounds 0-15 + %if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds + movdqu W_TMP, [BUFFER_PTR + (i * 4)] + %elif ((i & 3) == 1) + pshufb W_TMP, XMM_SHUFB_BSWAP + movdqa W, W_TMP + %elif ((i & 3) == 2) + paddd W_TMP, [K_BASE] + %elif ((i & 3) == 3) + movdqa [WK(i&~3)], W_TMP + + W_PRECALC_ROTATE + %endif +%endmacro + +%macro W_PRECALC_16_31 0 + ;; message scheduling pre-compute for rounds 16-31 + ;; calculating last 32 w[i] values in 8 XMM registers + ;; pre-calculate K+w[i] values and store to mem, for later load by ALU add instruction + ;; + ;; "brute force" vectorization for rounds 16-31 only due to w[i]->w[i-3] dependency + ;; + %if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds + movdqa W, W_minus_12 + palignr W, W_minus_16, 8 ;; w[i-14] + movdqa W_TMP, W_minus_04 + psrldq W_TMP, 4 ;; w[i-3] + pxor W, W_minus_08 + %elif ((i & 3) == 1) + pxor W_TMP, W_minus_16 + pxor W, W_TMP + movdqa W_TMP2, W + movdqa W_TMP, W + pslldq W_TMP2, 12 + %elif ((i & 3) == 2) + psrld W, 31 + pslld W_TMP, 1 + por W_TMP, W + movdqa W, W_TMP2 + psrld W_TMP2, 30 + pslld W, 2 + %elif ((i & 3) == 3) + pxor W_TMP, W + pxor W_TMP, W_TMP2 + movdqa W, W_TMP + paddd W_TMP, [K_BASE + K_XMM] + movdqa [WK(i&~3)],W_TMP + + W_PRECALC_ROTATE + %endif +%endmacro + +%macro W_PRECALC_32_79 0 + ;; in SHA-1 specification: w[i] = (w[i-3] ^ w[i-8] ^ w[i-14] ^ w[i-16]) rol 1 + ;; instead we do equal: w[i] = (w[i-6] ^ w[i-16] ^ w[i-28] ^ w[i-32]) rol 2 + ;; allows more efficient vectorization since w[i]=>w[i-3] dependency is broken + ;; + %if ((i & 3) == 0) ;; blended SSE and ALU instruction scheduling, 1 vector iteration per 4 rounds + movdqa W_TMP, W_minus_04 + pxor W, W_minus_28 ;; W is W_minus_32 before xor + palignr W_TMP, W_minus_08, 8 + %elif ((i & 3) == 1) + pxor W, W_minus_16 + pxor W, W_TMP + movdqa W_TMP, W + %elif ((i & 3) == 2) + psrld W, 30 + pslld W_TMP, 2 + por W_TMP, W + %elif ((i & 3) == 3) + movdqa W, W_TMP + paddd W_TMP, [K_BASE + K_XMM] + movdqa [WK(i&~3)],W_TMP + + W_PRECALC_ROTATE + %endif +%endmacro + +%macro RR 6 ;; RR does two rounds of SHA-1 back to back with W pre-calculation + + ;; TEMP = A + ;; A = F( i, B, C, D ) + E + ROTATE_LEFT( A, 5 ) + W[i] + K(i) + ;; C = ROTATE_LEFT( B, 30 ) + ;; D = C + ;; E = D + ;; B = TEMP + + W_PRECALC (%6 + W_PRECALC_AHEAD) + F %2, %3, %4 ;; F returns result in T1 + add %5, [WK(%6)] + rol %2, 30 + mov T2, %1 + add %4, [WK(%6 + 1)] + rol T2, 5 + add %5, T1 + + W_PRECALC (%6 + W_PRECALC_AHEAD + 1) + add T2, %5 + mov %5, T2 + rol T2, 5 + add %4, T2 + F %1, %2, %3 ;; F returns result in T1 + add %4, T1 + rol %1, 30 + +;; write: %1, %2 +;; rotate: %1<=%4, %2<=%5, %3<=%1, %4<=%2, %5<=%3 +%endmacro + + + +;;---------------------- +section .data align=128 + +%xdefine K1 0x5a827999 +%xdefine K2 0x6ed9eba1 +%xdefine K3 0x8f1bbcdc +%xdefine K4 0xca62c1d6 + +align 128 +K_XMM_AR: + DD K1, K1, K1, K1 + DD K2, K2, K2, K2 + DD K3, K3, K3, K3 + DD K4, K4, K4, K4 + +align 16 +bswap_shufb_ctl: + DD 00010203h + DD 04050607h + DD 08090a0bh + DD 0c0d0e0fh + +;; dispatch pointer, points to the init routine for the first invocation +sha1_update_intel_dispatched: + DQ sha1_update_intel_init_ + +;;---------------------- +section .text align=4096 + +SHA1_VECTOR_ASM sha1_update_intel_ssse3_, multiblock + +align 32 +sha1_update_intel_init_: ;; we get here with the first time invocation + call sha1_update_intel_dispacth_init_ +INTEL_SHA1_UPDATE_FUNCNAME: ;; we get here after init + jmp qword [sha1_update_intel_dispatched] + +;; CPUID feature flag based dispatch +sha1_update_intel_dispacth_init_: + push rax + push rbx + push rcx + push rdx + push rsi + + lea rsi, [INTEL_SHA1_UPDATE_DEFAULT_DISPATCH] + + mov eax, 1 + cpuid + + test ecx, 0200h ;; SSSE3 support, CPUID.1.ECX[bit 9] + jz _done + + lea rsi, [sha1_update_intel_ssse3_] + +_done: + mov [sha1_update_intel_dispatched], rsi + + pop rsi + pop rdx + pop rcx + pop rbx + pop rax + ret + +;;---------------------- +;; in the case a default SHA-1 update function implementation was not provided +;; and code was invoked on a non-SSSE3 supporting CPU, dispatch handles this +;; failure in a safest way - jumps to the stub function with UD2 instruction below +sha1_intel_non_ssse3_cpu_stub_: + ud2 ;; in the case no default SHA-1 was provided non-SSSE3 CPUs safely fail here + ret + +; END +;---------------------- diff --git a/pkg/crypto/sha1/sha1_test.go b/pkg/crypto/sha1/sha1_test.go new file mode 100644 index 000000000..eecdfaf62 --- /dev/null +++ b/pkg/crypto/sha1/sha1_test.go @@ -0,0 +1,154 @@ +/* + * Minio Cloud Storage, (C) 2015-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file of +// Golang project: +// https://github.com/golang/go/blob/master/LICENSE + +// Using this part of Minio codebase under the license +// Apache License Version 2.0 with modifications + +// SHA1 hash algorithm. See RFC 3174. + +package sha1 + +import ( + "crypto/rand" + "fmt" + "io" + "testing" +) + +type sha1Test struct { + out string + in string +} + +var golden = []sha1Test{ + {"da39a3ee5e6b4b0d3255bfef95601890afd80709", ""}, + {"86f7e437faa5a7fce15d1ddcb9eaeaea377667b8", "a"}, + {"da23614e02469a0d7c7bd1bdab5c9c474b1904dc", "ab"}, + {"a9993e364706816aba3e25717850c26c9cd0d89d", "abc"}, + {"81fe8bfe87576c3ecb22426f8e57847382917acf", "abcd"}, + {"03de6c570bfe24bfc328ccd7ca46b76eadaf4334", "abcde"}, + {"1f8ac10f23c5b5bc1167bda84b833e5c057a77d2", "abcdef"}, + {"2fb5e13419fc89246865e7a324f476ec624e8740", "abcdefg"}, + {"425af12a0743502b322e93a015bcf868e324d56a", "abcdefgh"}, + {"c63b19f1e4c8b5f76b25c49b8b87f57d8e4872a1", "abcdefghi"}, + {"d68c19a0a345b7eab78d5e11e991c026ec60db63", "abcdefghij"}, + {"ebf81ddcbe5bf13aaabdc4d65354fdf2044f38a7", "Discard medicine more than two years old."}, + {"e5dea09392dd886ca63531aaa00571dc07554bb6", "He who has a shady past knows that nice guys finish last."}, + {"45988f7234467b94e3e9494434c96ee3609d8f8f", "I wouldn't marry him with a ten foot pole."}, + {"55dee037eb7460d5a692d1ce11330b260e40c988", "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {"b7bc5fb91080c7de6b582ea281f8a396d7c0aee8", "The days of the digital watch are numbered. -Tom Stoppard"}, + {"c3aed9358f7c77f523afe86135f06b95b3999797", "Nepal premier won't resign."}, + {"6e29d302bf6e3a5e4305ff318d983197d6906bb9", "For every action there is an equal and opposite government program."}, + {"597f6a540010f94c15d71806a99a2c8710e747bd", "His money is twice tainted: 'taint yours and 'taint mine."}, + {"6859733b2590a8a091cecf50086febc5ceef1e80", "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {"514b2630ec089b8aee18795fc0cf1f4860cdacad", "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {"c5ca0d4a7b6676fc7aa72caa41cc3d5df567ed69", "size: a.out: bad magic"}, + {"74c51fa9a04eadc8c1bbeaa7fc442f834b90a00a", "The major problem is with sendmail. -Mark Horton"}, + {"0b4c4ce5f52c3ad2821852a8dc00217fa18b8b66", "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {"3ae7937dd790315beb0f48330e8642237c61550a", "If the enemy is within range, then so are you."}, + {"410a2b296df92b9a47412b13281df8f830a9f44b", "It's well we cannot hear the screams/That we create in others' dreams."}, + {"841e7c85ca1adcddbdd0187f1289acb5c642f7f5", "You remind me of a TV show, but that's all right: I watch it anyway."}, + {"163173b825d03b952601376b25212df66763e1db", "C is as portable as Stonehedge!!"}, + {"32b0377f2687eb88e22106f133c586ab314d5279", "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {"0885aaf99b569542fd165fa44e322718f4a984e0", "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {"6627d6904d71420b0bf3886ab629623538689f45", "How can you write a big system without C++? -Paul Glick"}, +} + +func TestGolden(t *testing.T) { + for i := 0; i < len(golden); i++ { + g := golden[i] + s := fmt.Sprintf("%x", Sum([]byte(g.in))) + if s != g.out { + t.Fatalf("Sum function: sha1(%s) = %s want %s", g.in, s, g.out) + } + c := New() + for j := 0; j < 3; j++ { + if j < 2 { + io.WriteString(c, g.in) + } else { + io.WriteString(c, g.in[0:len(g.in)/2]) + c.Sum(nil) + io.WriteString(c, g.in[len(g.in)/2:]) + } + s := fmt.Sprintf("%x", c.Sum(nil)) + if s != g.out { + t.Fatalf("sha1[%d](%s) = %s want %s", j, g.in, s, g.out) + } + c.Reset() + } + } +} + +func TestSize(t *testing.T) { + c := New() + if got := c.Size(); got != Size { + t.Errorf("Size = %d; want %d", got, Size) + } +} + +func TestBlockSize(t *testing.T) { + c := New() + if got := c.BlockSize(); got != BlockSize { + t.Errorf("BlockSize = %d; want %d", got, BlockSize) + } +} + +// Tests that blockGeneric (pure Go) and block (in assembly for amd64, 386, arm) match. +func TestBlockGeneric(t *testing.T) { + gen, asm := New().(*digest), New().(*digest) + buf := make([]byte, BlockSize*20) // arbitrary factor + rand.Read(buf) + blockGeneric(gen, buf) + block(asm, buf) + if *gen != *asm { + t.Error("block and blockGeneric resulted in different states") + } +} + +var bench = New() +var buf = make([]byte, 1024*1024) + +func benchmarkSize(b *testing.B, size int) { + b.SetBytes(int64(size)) + sum := make([]byte, bench.Size()) + for i := 0; i < b.N; i++ { + bench.Reset() + bench.Write(buf[:size]) + bench.Sum(sum[:0]) + } +} + +func BenchmarkHash8Bytes(b *testing.B) { + benchmarkSize(b, 8) +} + +func BenchmarkHash1K(b *testing.B) { + benchmarkSize(b, 1024) +} + +func BenchmarkHash8K(b *testing.B) { + benchmarkSize(b, 8192) +} + +func BenchmarkHash1M(b *testing.B) { + benchmarkSize(b, 1024*1024) +} diff --git a/pkg/crypto/sha1/sha1_yasm_darwin.go b/pkg/crypto/sha1/sha1_yasm_darwin.go new file mode 100644 index 000000000..b66017204 --- /dev/null +++ b/pkg/crypto/sha1/sha1_yasm_darwin.go @@ -0,0 +1,21 @@ +// +build darwin,amd64 + +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha1 + +//go:generate yasm -f macho64 -DINTEL_SHA1_UPDATE_FUNCNAME=_sha1_update_intel sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso diff --git a/pkg/crypto/sha1/sha1_yasm_linux.go b/pkg/crypto/sha1/sha1_yasm_linux.go new file mode 100644 index 000000000..4db62a71b --- /dev/null +++ b/pkg/crypto/sha1/sha1_yasm_linux.go @@ -0,0 +1,21 @@ +// +build linux,amd64 + +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha1 + +//go:generate yasm -f elf64 sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso diff --git a/pkg/crypto/sha1/sha1_yasm_windows.go b/pkg/crypto/sha1/sha1_yasm_windows.go new file mode 100644 index 000000000..cbe0cac2d --- /dev/null +++ b/pkg/crypto/sha1/sha1_yasm_windows.go @@ -0,0 +1,21 @@ +// +build windows,amd64 + +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha1 + +//go:generate yasm -f win64 -DWIN_ABI=1 sha1_sse3_amd64.asm -o sha1_sse3_amd64.syso diff --git a/pkg/crypto/sha1/sha1block.go b/pkg/crypto/sha1/sha1block.go new file mode 100644 index 000000000..5232281d2 --- /dev/null +++ b/pkg/crypto/sha1/sha1block.go @@ -0,0 +1,43 @@ +// +build amd64,cgo +// +build darwin windows + +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha1 + +// #include +// #include +// void sha1_update_intel(int32_t *hash, const char* input, size_t num_blocks); +import "C" +import ( + "unsafe" + + "github.com/minio/minio/pkg/cpu" +) + +func block(dig *digest, p []byte) { + switch true { + case cpu.HasSSE41() == true: + blockSSE3(dig, p) + default: + blockGeneric(dig, p) + } +} + +func blockSSE3(dig *digest, p []byte) { + C.sha1_update_intel((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk)) +} diff --git a/pkg/crypto/sha1/sha1block_generic.go b/pkg/crypto/sha1/sha1block_generic.go new file mode 100644 index 000000000..4d4ae7a25 --- /dev/null +++ b/pkg/crypto/sha1/sha1block_generic.go @@ -0,0 +1,110 @@ +/* + * Minio Cloud Storage, (C) 2015-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file of +// Golang project: +// https://github.com/golang/go/blob/master/LICENSE + +// Using this part of Minio codebase under the license +// Apache License Version 2.0 with modifications + +package sha1 + +const ( + _K0 = 0x5A827999 + _K1 = 0x6ED9EBA1 + _K2 = 0x8F1BBCDC + _K3 = 0xCA62C1D6 +) + +// blockGeneric is a portable, pure Go version of the SHA1 block step. +// It's used by sha1block_generic.go and tests. +func blockGeneric(dig *digest, p []byte) { + var w [16]uint32 + + h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] + for len(p) >= chunk { + // Can interlace the computation of w with the + // rounds below if needed for speed. + for i := 0; i < 16; i++ { + j := i * 4 + w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3]) + } + + a, b, c, d, e := h0, h1, h2, h3, h4 + + // Each of the four 20-iteration rounds + // differs only in the computation of f and + // the choice of K (_K0, _K1, etc). + i := 0 + for ; i < 16; i++ { + f := b&c | (^b)&d + a5 := a<<5 | a>>(32-5) + b30 := b<<30 | b>>(32-30) + t := a5 + f + e + w[i&0xf] + _K0 + a, b, c, d, e = t, a, b30, c, d + } + for ; i < 20; i++ { + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + + f := b&c | (^b)&d + a5 := a<<5 | a>>(32-5) + b30 := b<<30 | b>>(32-30) + t := a5 + f + e + w[i&0xf] + _K0 + a, b, c, d, e = t, a, b30, c, d + } + for ; i < 40; i++ { + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + f := b ^ c ^ d + a5 := a<<5 | a>>(32-5) + b30 := b<<30 | b>>(32-30) + t := a5 + f + e + w[i&0xf] + _K1 + a, b, c, d, e = t, a, b30, c, d + } + for ; i < 60; i++ { + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + f := ((b | c) & d) | (b & c) + + a5 := a<<5 | a>>(32-5) + b30 := b<<30 | b>>(32-30) + t := a5 + f + e + w[i&0xf] + _K2 + a, b, c, d, e = t, a, b30, c, d + } + for ; i < 80; i++ { + tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf] + w[i&0xf] = tmp<<1 | tmp>>(32-1) + f := b ^ c ^ d + a5 := a<<5 | a>>(32-5) + b30 := b<<30 | b>>(32-30) + t := a5 + f + e + w[i&0xf] + _K3 + a, b, c, d, e = t, a, b30, c, d + } + + h0 += a + h1 += b + h2 += c + h3 += d + h4 += e + + p = p[chunk:] + } + dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4 +} diff --git a/pkg/crypto/sha1/sha1block_linux.go b/pkg/crypto/sha1/sha1block_linux.go new file mode 100644 index 000000000..d370d89c3 --- /dev/null +++ b/pkg/crypto/sha1/sha1block_linux.go @@ -0,0 +1,50 @@ +// +build linux,amd64,cgo + +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha1 + +// #cgo CFLAGS: -DHAS_AVX2 +// #include +// #include +// void sha1_transform(int32_t *hash, const char* input, size_t num_blocks); +// void sha1_update_intel(int32_t *hash, const char* input, size_t num_blocks); +import "C" +import ( + "unsafe" + + "github.com/minio/minio/pkg/cpu" +) + +func block(dig *digest, p []byte) { + switch true { + case cpu.HasAVX2(): + blockAVX2(dig, p) + case cpu.HasSSE41(): + blockSSE3(dig, p) + default: + blockGeneric(dig, p) + } +} + +func blockAVX2(dig *digest, p []byte) { + C.sha1_transform((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk)) +} + +func blockSSE3(dig *digest, p []byte) { + C.sha1_update_intel((*C.int32_t)(unsafe.Pointer(&dig.h[0])), (*C.char)(unsafe.Pointer(&p[0])), (C.size_t)(len(p)/chunk)) +} diff --git a/pkg/crypto/sha1/sha1block_nocgo.go b/pkg/crypto/sha1/sha1block_nocgo.go new file mode 100644 index 000000000..00401607a --- /dev/null +++ b/pkg/crypto/sha1/sha1block_nocgo.go @@ -0,0 +1,23 @@ +// +build !cgo arm + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package sha1 + +func block(dig *digest, p []byte) { + blockGeneric(dig, p) +} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/LICENSE b/pkg/crypto/sha256/LICENSE similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/LICENSE rename to pkg/crypto/sha256/LICENSE diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256-avx-asm_linux_amd64.S b/pkg/crypto/sha256/sha256-avx-asm_linux_amd64.S similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256-avx-asm_linux_amd64.S rename to pkg/crypto/sha256/sha256-avx-asm_linux_amd64.S diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256-avx2-asm_linux_amd64.S b/pkg/crypto/sha256/sha256-avx2-asm_linux_amd64.S similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256-avx2-asm_linux_amd64.S rename to pkg/crypto/sha256/sha256-avx2-asm_linux_amd64.S diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256-ssse3-asm_linux_amd64.S b/pkg/crypto/sha256/sha256-ssse3-asm_linux_amd64.S similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256-ssse3-asm_linux_amd64.S rename to pkg/crypto/sha256/sha256-ssse3-asm_linux_amd64.S diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux.go b/pkg/crypto/sha256/sha256.go similarity index 58% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux.go rename to pkg/crypto/sha256/sha256.go index 685fc9164..b69aa3ac5 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux.go +++ b/pkg/crypto/sha256/sha256.go @@ -1,7 +1,7 @@ -// +build 386 arm amd64,!cgo +// +build darwin windows 386 arm !cgo /* - * Minio Cloud Storage, (C) 2015 Minio, Inc. + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,36 +20,22 @@ package sha256 import ( "hash" - "io" "crypto/sha256" ) -// Sum256 - single caller sha256 helper -func Sum256(data []byte) []byte { - d := sha256.New() - d.Write(data) - return d.Sum(nil) -} +// Size - The size of a SHA256 checksum in bytes. +const Size = 32 -// Sum - io.Reader based streaming sha256 helper -func Sum(reader io.Reader) ([]byte, error) { - d := sha256.New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - d.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return d.Sum(nil), nil -} +// BlockSize - The blocksize of SHA256 in bytes. +const BlockSize = 64 // New returns a new hash.Hash computing SHA256. func New() hash.Hash { return sha256.New() } + +// Sum256 - single caller sha256 helper +func Sum256(data []byte) [Size]byte { + return sha256.Sum256(data) +} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64.go b/pkg/crypto/sha256/sha256_linux.go similarity index 80% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64.go rename to pkg/crypto/sha256/sha256_linux.go index c6243ff95..c38defe74 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64.go +++ b/pkg/crypto/sha256/sha256_linux.go @@ -1,4 +1,20 @@ -// +build amd64,cgo +// +build linux,amd64,cgo + +/* + * Minio Cloud Storage, (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -14,9 +30,8 @@ package sha256 import ( "hash" - "io" - "github.com/minio/minio-xl/pkg/cpu" + "github.com/minio/minio/pkg/cpu" ) // Size - The size of a SHA256 checksum in bytes. @@ -79,6 +94,14 @@ func New() hash.Hash { return d } +// Sum256 - single caller sha256 helper +func Sum256(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + // Return size of checksum func (d *digest) Size() int { return Size } @@ -152,30 +175,3 @@ func (d *digest) checkSum() [Size]byte { return digest } - -/// Convenience functions - -// Sum256 - single caller sha256 helper -func Sum256(data []byte) []byte { - var d digest - d.Reset() - d.Write(data) - return d.Sum(nil) -} - -// Sum - io.Reader based streaming sha256 helper -func Sum(reader io.Reader) ([]byte, error) { - h := New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - h.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return h.Sum(nil), nil -} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64_test.go b/pkg/crypto/sha256/sha256_test.go similarity index 89% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64_test.go rename to pkg/crypto/sha256/sha256_test.go index 3ac7ffb2e..3caa2b758 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_linux_amd64_test.go +++ b/pkg/crypto/sha256/sha256_test.go @@ -1,4 +1,18 @@ -// +build amd64,cgo +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256block_linux_amd64.go b/pkg/crypto/sha256/sha256block.go similarity index 99% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256block_linux_amd64.go rename to pkg/crypto/sha256/sha256block.go index 1474589d4..22aace8a5 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256block_linux_amd64.go +++ b/pkg/crypto/sha256/sha256block.go @@ -1,4 +1,4 @@ -// +build amd64,cgo +// +build linux,amd64,cgo // // Minio Cloud Storage, (C) 2015 Minio, Inc. diff --git a/vendor/github.com/minio/minio-xl/pkg/minhttp/LICENSE.Minio b/pkg/crypto/sha512/LICENSE similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/minhttp/LICENSE.Minio rename to pkg/crypto/sha512/LICENSE diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512-avx-asm_linux_amd64.S b/pkg/crypto/sha512/sha512-avx-asm_linux_amd64.S similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512-avx-asm_linux_amd64.S rename to pkg/crypto/sha512/sha512-avx-asm_linux_amd64.S diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512-avx2-asm_linux_amd64.S b/pkg/crypto/sha512/sha512-avx2-asm_linux_amd64.S similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512-avx2-asm_linux_amd64.S rename to pkg/crypto/sha512/sha512-avx2-asm_linux_amd64.S diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512-ssse3-asm_linux_amd64.S b/pkg/crypto/sha512/sha512-ssse3-asm_linux_amd64.S similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512-ssse3-asm_linux_amd64.S rename to pkg/crypto/sha512/sha512-ssse3-asm_linux_amd64.S diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux.go b/pkg/crypto/sha512/sha512.go similarity index 55% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux.go rename to pkg/crypto/sha512/sha512.go index a770813b9..57677a907 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux.go +++ b/pkg/crypto/sha512/sha512.go @@ -1,7 +1,7 @@ -// +build 386 arm amd64,!cgo +// +build darwin windows 386 arm !cgo /* - * Minio Cloud Storage, (C) 2014 Minio, Inc. + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,41 +20,22 @@ package sha512 import ( "hash" - "io" "crypto/sha512" ) -// The size of a SHA512 checksum in bytes. -const ( - Size = sha512.Size -) +// Size - The size of a SHA512 checksum in bytes. +const Size = 64 -// Sum512 - single caller sha512 helper -func Sum512(data []byte) []byte { - d := sha512.New() - d.Write(data) - return d.Sum(nil) -} - -// Sum - io.Reader based streaming sha512 helper -func Sum(reader io.Reader) ([]byte, error) { - d := sha512.New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - d.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return d.Sum(nil), nil -} +// BlockSize - The blocksize of SHA512 in bytes. +const BlockSize = 128 // New returns a new hash.Hash computing SHA512. func New() hash.Hash { return sha512.New() } + +// Sum512 - single caller sha512 helper +func Sum512(data []byte) [Size]byte { + return sha512.Sum512(data) +} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64.go b/pkg/crypto/sha512/sha512_linux.go similarity index 86% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64.go rename to pkg/crypto/sha512/sha512_linux.go index 696a7d538..6fdda8d63 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64.go +++ b/pkg/crypto/sha512/sha512_linux.go @@ -1,4 +1,4 @@ -// +build amd64,cgo +// +build linux,amd64,cgo // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -15,9 +15,8 @@ package sha512 import ( "hash" - "io" - "github.com/minio/minio-xl/pkg/cpu" + "github.com/minio/minio/pkg/cpu" ) // Size - The size of a SHA512 checksum in bytes. @@ -80,6 +79,14 @@ func New() hash.Hash { return d } +// Sum512 - single caller sha512 helper +func Sum512(data []byte) [Size]byte { + var d digest + d.Reset() + d.Write(data) + return d.checkSum() +} + // Return output array byte size func (d *digest) Size() int { return Size } @@ -157,30 +164,3 @@ func (d *digest) checkSum() [Size]byte { return digest } - -/// Convenience functions - -// Sum512 - single caller sha512 helper -func Sum512(data []byte) []byte { - var d digest - d.Reset() - d.Write(data) - return d.Sum(nil) -} - -// Sum - io.Reader based streaming sha512 helper -func Sum(reader io.Reader) ([]byte, error) { - h := New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - h.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return h.Sum(nil), nil -} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64_test.go b/pkg/crypto/sha512/sha512_test.go similarity index 92% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64_test.go rename to pkg/crypto/sha512/sha512_test.go index 6184c5b55..3e1db6ebc 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_linux_amd64_test.go +++ b/pkg/crypto/sha512/sha512_test.go @@ -1,4 +1,18 @@ -// +build amd64,cgo +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512block_linux_amd64.go b/pkg/crypto/sha512/sha512block.go similarity index 87% rename from vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512block_linux_amd64.go rename to pkg/crypto/sha512/sha512block.go index 80faf37bd..f7af98e01 100644 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512block_linux_amd64.go +++ b/pkg/crypto/sha512/sha512block.go @@ -1,20 +1,21 @@ -// +build amd64,cgo +// +build linux,amd64,cgo + +/* + * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ -// -// Minio Cloud Storage, (C) 2015 Minio, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// // Software block transform are provided by The Go Authors: // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style diff --git a/pkg/erasure/.gitignore b/pkg/erasure/.gitignore new file mode 100644 index 000000000..a11ae5b8a --- /dev/null +++ b/pkg/erasure/.gitignore @@ -0,0 +1 @@ +*.syso \ No newline at end of file diff --git a/pkg/erasure/INSTALLGO.md b/pkg/erasure/INSTALLGO.md new file mode 100644 index 000000000..d8319326f --- /dev/null +++ b/pkg/erasure/INSTALLGO.md @@ -0,0 +1,64 @@ +## Ubuntu (Kylin) 14.04 +### Build Dependencies +This installation document assumes Ubuntu 14.04+ on x86-64 platform. + +##### Install Git, GCC, yasm +```sh +$ sudo apt-get install git build-essential yasm +``` + +##### Install Go 1.5.1+ + +Download Go 1.5.1+ from [https://golang.org/dl/](https://golang.org/dl/). + +```sh +$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz +$ mkdir -p ${HOME}/bin/ +$ mkdir -p ${HOME}/go/ +$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz +``` +##### Setup GOROOT and GOPATH + +Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries +and GOPATH specifies the location of your project workspace. + +```sh +$ export GOROOT=${HOME}/bin/go +$ export GOPATH=${HOME}/go +$ export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin +``` + +## OS X (Yosemite) 10.10 +### Build Dependencies +This installation document assumes OS X Yosemite 10.10+ on x86-64 platform. + +##### Install brew +```sh +$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" +``` + +##### Install Git, Python +```sh +$ brew install git python yasm +``` + +##### Install Go 1.5.1+ + +Install golang binaries using `brew` + +```sh +$ brew install go +$ mkdir -p $HOME/go +``` + +##### Setup GOROOT and GOPATH + +Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries +and GOPATH specifies the location of your project workspace. + +```sh +$ export GOPATH=${HOME}/go +$ export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6) +$ export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec +$ export PATH=$PATH:${GOPATH}/bin +``` diff --git a/pkg/erasure/LICENSE.INTEL b/pkg/erasure/LICENSE.INTEL new file mode 100644 index 000000000..667056695 --- /dev/null +++ b/pkg/erasure/LICENSE.INTEL @@ -0,0 +1,26 @@ + Copyright(c) 2011-2014 Intel Corporation All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/erasure/LICENSE.MINIO b/pkg/erasure/LICENSE.MINIO new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/pkg/erasure/LICENSE.MINIO @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/erasure/README.md b/pkg/erasure/README.md new file mode 100644 index 000000000..4cef6311b --- /dev/null +++ b/pkg/erasure/README.md @@ -0,0 +1,25 @@ +## Introduction + +Erasure is an open source Golang library written on top of ISAL (Intel Intelligent Storage Library) released under [Apache license v2](./LICENSE) + +### Developers +* [Get Source](./CONTRIBUTING.md) +* [Build Dependencies](./BUILDDEPS.md) +* [Development Workflow](./CONTRIBUTING.md#developer-guidelines) +* [Developer discussions and bugs](https://github.com/minio/minio/issues) + +### Supported platforms + +| Name | Supported | +| ------------- | ------------- | +| Linux | Yes | +| Windows | Not yet | +| Mac OSX | Yes | + +### Supported architectures + +| Arch | Supported | +| ------------- | ------------- | +| x86-64 | Yes | +| arm64 | Not yet| +| i386 | Never | diff --git a/pkg/erasure/ctypes.go b/pkg/erasure/ctypes.go new file mode 100644 index 000000000..60ef6482b --- /dev/null +++ b/pkg/erasure/ctypes.go @@ -0,0 +1,62 @@ +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package erasure + +// #include +import "C" +import ( + "fmt" + "unsafe" +) + +// intSlice2CIntArray converts Go int slice to C int array +func intSlice2CIntArray(srcErrList []int) *C.int32_t { + if len(srcErrList) == 0 { + return (*C.int32_t)(unsafe.Pointer(nil)) + } + var sizeErrInt = int(unsafe.Sizeof(srcErrList[0])) + switch sizeInt { + case sizeErrInt: + return (*C.int32_t)(unsafe.Pointer(&srcErrList[0])) + case sizeInt8: + int8Array := make([]int8, len(srcErrList)) + for i, v := range srcErrList { + int8Array[i] = int8(v) + } + return (*C.int32_t)(unsafe.Pointer(&int8Array[0])) + case sizeInt16: + int16Array := make([]int16, len(srcErrList)) + for i, v := range srcErrList { + int16Array[i] = int16(v) + } + return (*C.int32_t)(unsafe.Pointer(&int16Array[0])) + case sizeInt32: + int32Array := make([]int32, len(srcErrList)) + for i, v := range srcErrList { + int32Array[i] = int32(v) + } + return (*C.int32_t)(unsafe.Pointer(&int32Array[0])) + case sizeInt64: + int64Array := make([]int64, len(srcErrList)) + for i, v := range srcErrList { + int64Array[i] = int64(v) + } + return (*C.int32_t)(unsafe.Pointer(&int64Array[0])) + default: + panic(fmt.Sprintf("Unsupported: %d", sizeInt)) + } +} diff --git a/pkg/erasure/doc.go b/pkg/erasure/doc.go new file mode 100644 index 000000000..4e7edbc63 --- /dev/null +++ b/pkg/erasure/doc.go @@ -0,0 +1,66 @@ +// Package erasure is a Go wrapper for the Intel Intelligent Storage +// Acceleration Library (Intel ISA-L). Intel ISA-L is a CPU optimized +// implementation of erasure coding algorithms. +// +// For more information on Intel ISA-L, please visit: +// https://01.org/intel%C2%AE-storage-acceleration-library-open-source-version +// +// Usage: +// +// Encode encodes a block of data. The input is the original data. The output +// is a 2 tuple containing (k + m) chunks of erasure encoded data and the +// length of the original object. +// +// Decode decodes 2 tuple data containing (k + m) chunks back into its original form. +// Additionally original block length should also be provided as input. +// +// Decoded data is exactly similar in length and content as the original data. +// +// Encoding data may be performed in 3 steps. +// +// 1. Create a parse set of encoder parameters +// 2. Create a new encoder +// 3. Encode data +// +// Decoding data is also performed in 3 steps. +// +// 1. Create a parse set of encoder parameters for validation +// 2. Create a new encoder +// 3. Decode data +// +// Erasure parameters contain three configurable elements: +// ValidateParams(k, m, technique int) (ErasureParams, error) +// k - Number of rows in matrix +// m - Number of colums in matrix +// technique - Matrix type, can be either Cauchy (recommended) or Vandermonde +// constraints: k + m < Galois Field (2^8) +// +// Choosing right parity and matrix technique is left for application to decide. +// +// But here are the few points to keep in mind +// +// Matrix Type: +// - Vandermonde is most commonly used method for choosing coefficients in erasure +// encoding but does not guarantee invertable for every sub matrix. +// - Whereas Cauchy is our recommended method for choosing coefficients in erasure coding. +// Since any sub-matrix of a Cauchy matrix is invertable. +// +// Total blocks: +// - Data blocks and Parity blocks should not be greater than 'Galois Field' (2^8) +// +// Example +// +// Creating and using an encoder +// var bytes []byte +// params := erasure.ValidateParams(10, 5) +// encoder := erasure.NewErasure(params) +// encodedData, length := encoder.Encode(bytes) +// +// Creating and using a decoder +// var encodedData [][]byte +// var length int +// params := erasure.ValidateParams(10, 5) +// encoder := erasure.NewErasure(params) +// originalData, err := encoder.Decode(encodedData, length) +// +package erasure diff --git a/pkg/erasure/ec_minio_common.h b/pkg/erasure/ec_minio_common.h new file mode 100644 index 000000000..ee7965c22 --- /dev/null +++ b/pkg/erasure/ec_minio_common.h @@ -0,0 +1,39 @@ +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __COMMON_H__ +#define __COMMON_H__ + +#include + +int32_t minio_init_encoder (int k, int m, + unsigned char **encode_matrix, + unsigned char **encode_tbls); + +int32_t minio_init_decoder (int32_t *error_index, + int k, int n, int errs, + unsigned char *encoding_matrix, + unsigned char **decode_matrix, + unsigned char **decode_tbls, + uint32_t **decode_index); + +int32_t minio_get_source_target (int errs, int k, int m, + int32_t *error_index, + uint32_t *decode_index, + unsigned char **buffs, + unsigned char ***source, + unsigned char ***target); +#endif /* __COMMON_H__ */ diff --git a/pkg/erasure/ec_minio_decode.c b/pkg/erasure/ec_minio_decode.c new file mode 100644 index 000000000..782a05b4f --- /dev/null +++ b/pkg/erasure/ec_minio_decode.c @@ -0,0 +1,142 @@ +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include "isa-l.h" +#include "ec_minio_common.h" + +static +int32_t _minio_src_index_in_error (int r, int32_t *error_index, int errs) +{ + int i; + for (i = 0; i < errs; i++) { + if (error_index[i] == r) { + // true + return 1; + } + } + // false + return 0; +} + +// Separate out source data and target buffers +int32_t minio_get_source_target (int errs, int k, int m, + int32_t *error_index, + uint32_t *decode_index, + unsigned char **buffs, + unsigned char ***source, + unsigned char ***target) +{ + int i; + unsigned char *tmp_source[k]; + unsigned char *tmp_target[m]; + + if (k < 0 || m < 0) { + return -1; + } + + memset (tmp_source, 0, k); + memset (tmp_target, 0, m); + + for (i = 0; i < k; i++) { + tmp_source[i] = (unsigned char *) buffs[decode_index[i]]; + } + + for (i = 0; i < m; i++) { + if (i < errs) + tmp_target[i] = (unsigned char *) buffs[error_index[i]]; + } + + *source = tmp_source; + *target = tmp_target; + + return 0; +} + +/* + Generate decode matrix during the decoding phase +*/ + +int minio_init_decoder (int32_t *error_index, + int k, int n, int errs, + unsigned char *encode_matrix, + unsigned char **decode_matrix, + unsigned char **decode_tbls, + uint32_t **decode_index) +{ + int i, j, r, l; + + uint32_t *tmp_decode_index = (uint32_t *) malloc(sizeof(uint32_t) * k); + unsigned char *input_matrix; + unsigned char *inverse_matrix; + unsigned char *tmp_decode_matrix; + unsigned char *tmp_decode_tbls; + + input_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n); + inverse_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n); + tmp_decode_matrix = (unsigned char *) malloc(sizeof(unsigned char) * k * n);; + tmp_decode_tbls = (unsigned char *) malloc(sizeof(unsigned char) * k * n * 32); + + for (i = 0, r = 0; i < k; i++, r++) { + while (_minio_src_index_in_error(r, error_index, errs)) + r++; + for (j = 0; j < k; j++) { + input_matrix[k * i + j] = encode_matrix[k * r + j]; + } + tmp_decode_index[i] = r; + } + + // Not all vandermonde matrix can be inverted + if (gf_invert_matrix(input_matrix, inverse_matrix, k) < 0) { + free(tmp_decode_matrix); + free(tmp_decode_tbls); + free(tmp_decode_index); + return -1; + } + + for (l = 0; l < errs; l++) { + if (error_index[l] < k) { + // decoding matrix elements for data chunks + for (j = 0; j < k; j++) { + tmp_decode_matrix[k * l + j] = + inverse_matrix[k * + error_index[l] + j]; + } + } else { + // decoding matrix element for coding chunks + for (i = 0; i < k; i++) { + unsigned char s = 0; + for (j = 0; j < k; j++) { + s ^= gf_mul(inverse_matrix[j * k + i], + encode_matrix[k * + error_index[l] + j]); + } + tmp_decode_matrix[k * l + i] = s; + } + } + } + + ec_init_tables (k, errs, tmp_decode_matrix, tmp_decode_tbls); + + *decode_matrix = tmp_decode_matrix; + *decode_tbls = tmp_decode_tbls; + *decode_index = tmp_decode_index; + + return 0; +} diff --git a/pkg/erasure/ec_minio_encode.c b/pkg/erasure/ec_minio_encode.c new file mode 100644 index 000000000..d30895c2f --- /dev/null +++ b/pkg/erasure/ec_minio_encode.c @@ -0,0 +1,55 @@ +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include "isa-l.h" +#include "ec_minio_common.h" + +/* + Generate encode matrix during the encoding phase +*/ + +int32_t minio_init_encoder (int k, int m, unsigned char **encode_matrix, unsigned char **encode_tbls) +{ + unsigned char *tmp_matrix; + unsigned char *tmp_tbls; + + tmp_matrix = (unsigned char *) malloc (k * (k + m)); + tmp_tbls = (unsigned char *) malloc (k * (k + m) * 32); + + if (k < 5) { + /* + Commonly used method for choosing coefficients in erasure + encoding but does not guarantee invertable for every sub + matrix. For large k it is possible to find cases where the + decode matrix chosen from sources and parity not in erasure + are not invertable. Users may want to adjust for k > 5. + -- Intel + */ + gf_gen_rs_matrix (tmp_matrix, k + m, k); + } else { + gf_gen_cauchy1_matrix (tmp_matrix, k + m, k); + } + + ec_init_tables(k, m, &tmp_matrix[k * k], tmp_tbls); + + *encode_matrix = tmp_matrix; + *encode_tbls = tmp_tbls; + + return 0; +} diff --git a/pkg/erasure/erasure_decode.go b/pkg/erasure/erasure_decode.go new file mode 100644 index 000000000..a08195a4c --- /dev/null +++ b/pkg/erasure/erasure_decode.go @@ -0,0 +1,123 @@ +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package erasure + +// #cgo CFLAGS: -O0 +// #include +// #include "isa-l.h" +// #include "ec_minio_common.h" +import "C" +import ( + "errors" + "fmt" + "unsafe" +) + +// Decode decodes erasure coded blocks of data into its original +// form. Erasure coded data contains K data blocks and M parity +// blocks. Decode can withstand data loss up to any M number of blocks. +// +// "encodedDataBlocks" is an array of K data blocks and M parity +// blocks. Data blocks are position and order dependent. Missing blocks +// are set to "nil". There must be at least "K" number of data|parity +// blocks. +// +// "dataLen" is the length of original source data +func (e *Erasure) Decode(encodedDataBlocks [][]byte, dataLen int) (decodedData []byte, err error) { + e.mutex.Lock() + defer e.mutex.Unlock() + + var source, target **C.uchar + + k := int(e.params.K) + m := int(e.params.M) + n := k + m + // We need the data and parity blocks preserved in the same order. Missing blocks are set to nil. + if len(encodedDataBlocks) != n { + msg := fmt.Sprintf("Encoded data blocks slice must of length [%d]", n) + return nil, errors.New(msg) + } + + // Length of a single encoded block + encodedBlockLen := GetEncodedBlockLen(dataLen, uint8(k)) + + // Keep track of errors per block. + missingEncodedBlocks := make([]int, n+1) + var missingEncodedBlocksCount int + + // Check for the missing encoded blocks + for i := range encodedDataBlocks { + if encodedDataBlocks[i] == nil || len(encodedDataBlocks[i]) == 0 { + missingEncodedBlocks[missingEncodedBlocksCount] = i + missingEncodedBlocksCount++ + } + } + + // Cannot reconstruct original data. Need at least M number of data or parity blocks. + if missingEncodedBlocksCount > m { + return nil, fmt.Errorf("Cannot reconstruct original data. Need at least [%d] data or parity blocks", m) + } + + // Convert from Go int slice to C int array + missingEncodedBlocksC := intSlice2CIntArray(missingEncodedBlocks[:missingEncodedBlocksCount]) + + // Allocate buffer for the missing blocks + for i := range encodedDataBlocks { + if encodedDataBlocks[i] == nil || len(encodedDataBlocks[i]) == 0 { + encodedDataBlocks[i] = make([]byte, encodedBlockLen) + } + } + + // If not already initialized, recompute and cache + if e.decodeMatrix == nil || e.decodeTbls == nil || e.decodeIndex == nil { + var decodeMatrix, decodeTbls *C.uchar + var decodeIndex *C.uint32_t + + C.minio_init_decoder(missingEncodedBlocksC, C.int(k), C.int(n), C.int(missingEncodedBlocksCount), + e.encodeMatrix, &decodeMatrix, &decodeTbls, &decodeIndex) + + // cache this for future needs + e.decodeMatrix = decodeMatrix + e.decodeTbls = decodeTbls + e.decodeIndex = decodeIndex + } + + // Make a slice of pointers to encoded blocks. Necessary to bridge to the C world. + pointers := make([]*byte, n) + for i := range encodedDataBlocks { + pointers[i] = &encodedDataBlocks[i][0] + } + + // Get pointers to source "data" and target "parity" blocks from the output byte array. + ret := C.minio_get_source_target(C.int(missingEncodedBlocksCount), C.int(k), C.int(m), missingEncodedBlocksC, + e.decodeIndex, (**C.uchar)(unsafe.Pointer(&pointers[0])), &source, &target) + if int(ret) == -1 { + return nil, errors.New("Unable to decode data") + } + + // Decode data + C.ec_encode_data(C.int(encodedBlockLen), C.int(k), C.int(missingEncodedBlocksCount), e.decodeTbls, + source, target) + + // Allocate buffer to output buffer + decodedData = make([]byte, 0, encodedBlockLen*int(k)) + for i := 0; i < int(k); i++ { + decodedData = append(decodedData, encodedDataBlocks[i]...) + } + + return decodedData[:dataLen], nil +} diff --git a/pkg/erasure/erasure_encode.go b/pkg/erasure/erasure_encode.go new file mode 100644 index 000000000..eee3090c7 --- /dev/null +++ b/pkg/erasure/erasure_encode.go @@ -0,0 +1,174 @@ +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package erasure + +// #include +// #include "isa-l.h" +// #include "ec_minio_common.h" +import "C" +import ( + "errors" + "sync" + "unsafe" +) + +// Block alignment +const ( + SIMDAlign = 32 +) + +// Params is a configuration set for building an encoder. It is created using ValidateParams(). +type Params struct { + K uint8 + M uint8 +} + +// Erasure is an object used to encode and decode data. +type Erasure struct { + params *Params + encodeMatrix, encodeTbls *C.uchar + decodeMatrix, decodeTbls *C.uchar + decodeIndex *C.uint32_t + mutex *sync.Mutex +} + +// ValidateParams creates an Params object. +// +// k and m represent the matrix size, which corresponds to the protection level +// technique is the matrix type. Valid inputs are Cauchy (recommended) or Vandermonde. +// +func ValidateParams(k, m uint8) (*Params, error) { + if k < 1 { + return nil, errors.New("k cannot be zero") + } + + if m < 1 { + return nil, errors.New("m cannot be zero") + } + + if k+m > 255 { + return nil, errors.New("(k + m) cannot be bigger than Galois field GF(2^8) - 1") + } + + return &Params{ + K: k, + M: m, + }, nil +} + +// NewErasure creates an encoder object with a given set of parameters. +func NewErasure(ep *Params) *Erasure { + var k = C.int(ep.K) + var m = C.int(ep.M) + + var encodeMatrix *C.uchar + var encodeTbls *C.uchar + + C.minio_init_encoder(k, m, &encodeMatrix, &encodeTbls) + + return &Erasure{ + params: ep, + encodeMatrix: encodeMatrix, + encodeTbls: encodeTbls, + decodeMatrix: nil, + decodeTbls: nil, + decodeIndex: nil, + mutex: new(sync.Mutex), + } +} + +// GetEncodedBlocksLen - total length of all encoded blocks +func GetEncodedBlocksLen(inputLen int, k, m uint8) (outputLen int) { + outputLen = GetEncodedBlockLen(inputLen, k) * int(k+m) + return outputLen +} + +// GetEncodedBlockLen - length per block of encoded blocks +func GetEncodedBlockLen(inputLen int, k uint8) (encodedOutputLen int) { + alignment := int(k) * SIMDAlign + remainder := inputLen % alignment + + paddedInputLen := inputLen + if remainder != 0 { + paddedInputLen = inputLen + (alignment - remainder) + } + encodedOutputLen = paddedInputLen / int(k) + return encodedOutputLen +} + +// Encode erasure codes a block of data in "k" data blocks and "m" parity blocks. +// Output is [k+m][]blocks of data and parity slices. +func (e *Erasure) Encode(inputData []byte) (encodedBlocks [][]byte, err error) { + e.mutex.Lock() + defer e.mutex.Unlock() + + k := int(e.params.K) // "k" data blocks + m := int(e.params.M) // "m" parity blocks + n := k + m // "n" total encoded blocks + + // Length of a single encoded chunk. + // Total number of encoded chunks = "k" data + "m" parity blocks + encodedBlockLen := GetEncodedBlockLen(len(inputData), uint8(k)) + + // Length of total number of "k" data chunks + encodedDataBlocksLen := encodedBlockLen * k + + // Length of extra padding required for the data blocks. + encodedDataBlocksPadLen := encodedDataBlocksLen - len(inputData) + + // Extend inputData buffer to accommodate coded data blocks if necesssary + if encodedDataBlocksPadLen > 0 { + padding := make([]byte, encodedDataBlocksPadLen) + // Expand with new padded blocks to the byte array + inputData = append(inputData, padding...) + } + + // Extend inputData buffer to accommodate coded parity blocks + { // Local Scope + encodedParityBlocksLen := encodedBlockLen * m + parityBlocks := make([]byte, encodedParityBlocksLen) + inputData = append(inputData, parityBlocks...) + } + + // Allocate memory to the "encoded blocks" return buffer + encodedBlocks = make([][]byte, n) // Return buffer + + // Necessary to bridge Go to the C world. C requires 2D arry of pointers to + // byte array. "encodedBlocks" is a 2D slice. + pointersToEncodedBlock := make([]*byte, n) // Pointers to encoded blocks. + + // Copy data block slices to encoded block buffer + for i := 0; i < k; i++ { + encodedBlocks[i] = inputData[i*encodedBlockLen : (i+1)*encodedBlockLen] + pointersToEncodedBlock[i] = &encodedBlocks[i][0] + } + + // Copy erasure block slices to encoded block buffer + for i := k; i < n; i++ { + encodedBlocks[i] = make([]byte, encodedBlockLen) + pointersToEncodedBlock[i] = &encodedBlocks[i][0] + } + + // Erasure code the data into K data blocks and M parity + // blocks. Only the parity blocks are filled. Data blocks remain + // intact. + C.ec_encode_data(C.int(encodedBlockLen), C.int(k), C.int(m), e.encodeTbls, + (**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[:k][0])), // Pointers to data blocks + (**C.uchar)(unsafe.Pointer(&pointersToEncodedBlock[k:][0]))) // Pointers to parity blocks + + return encodedBlocks, nil +} diff --git a/pkg/erasure/erasure_test.go b/pkg/erasure/erasure_test.go new file mode 100644 index 000000000..d0195143f --- /dev/null +++ b/pkg/erasure/erasure_test.go @@ -0,0 +1,82 @@ +// +build !windows + +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package erasure + +import ( + "bytes" + "testing" + + . "gopkg.in/check.v1" +) + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func Test(t *testing.T) { TestingT(t) } + +const ( + k = 10 + m = 5 +) + +func corruptChunks(chunks [][]byte, errorIndex []int) [][]byte { + for _, err := range errorIndex { + chunks[err] = nil + } + return chunks +} + +func (s *MySuite) TestEncodeDecodeFailure(c *C) { + ep, err := ValidateParams(k, m) + c.Assert(err, IsNil) + + data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.") + + e := NewErasure(ep) + chunks, err := e.Encode(data) + c.Assert(err, IsNil) + + errorIndex := []int{0, 3, 5, 9, 11, 13} + chunks = corruptChunks(chunks, errorIndex) + + _, err = e.Decode(chunks, len(data)) + c.Assert(err, Not(IsNil)) +} + +func (s *MySuite) TestEncodeDecodeSuccess(c *C) { + ep, err := ValidateParams(k, m) + c.Assert(err, IsNil) + + data := []byte("Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum.") + + e := NewErasure(ep) + chunks, err := e.Encode(data) + c.Assert(err, IsNil) + + errorIndex := []int{0, 3, 5, 9, 13} + chunks = corruptChunks(chunks, errorIndex) + + recoveredData, err := e.Decode(chunks, len(data)) + c.Assert(err, IsNil) + + if !bytes.Equal(data, recoveredData) { + c.Fatalf("Recovered data mismatches with original data") + } +} diff --git a/pkg/erasure/stdint.go b/pkg/erasure/stdint.go new file mode 100644 index 000000000..bca665a21 --- /dev/null +++ b/pkg/erasure/stdint.go @@ -0,0 +1,38 @@ +/* + * Minio Cloud Storage, (C) 2014 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package erasure + +// +// int sizeInt() +// { +// return sizeof(int); +// } +import "C" +import "unsafe" + +var ( + // See http://golang.org/ref/spec#Numeric_types + sizeInt = int(C.sizeInt()) + // SizeInt8 is the byte size of a int8. + sizeInt8 = int(unsafe.Sizeof(int8(0))) + // SizeInt16 is the byte size of a int16. + sizeInt16 = int(unsafe.Sizeof(int16(0))) + // SizeInt32 is the byte size of a int32. + sizeInt32 = int(unsafe.Sizeof(int32(0))) + // SizeInt64 is the byte size of a int64. + sizeInt64 = int(unsafe.Sizeof(int64(0))) +) diff --git a/pkg/fs/config.go b/pkg/fs/config.go index ea8aa3baf..8c6440043 100644 --- a/pkg/fs/config.go +++ b/pkg/fs/config.go @@ -17,8 +17,8 @@ package fs import ( - "github.com/minio/minio-xl/pkg/probe" - "github.com/minio/minio-xl/pkg/quick" + "github.com/minio/minio/pkg/probe" + "github.com/minio/minio/pkg/quick" ) var multipartsMetadataPath, bucketsMetadataPath string diff --git a/pkg/fs/errors.go b/pkg/fs/errors.go index df81424d1..550629480 100644 --- a/pkg/fs/errors.go +++ b/pkg/fs/errors.go @@ -39,16 +39,6 @@ func (e ExpiredPresignedRequest) Error() string { return "Presigned request already expired" } -// SignatureDoesNotMatch invalid signature -type SignatureDoesNotMatch struct { - SignatureSent string - SignatureCalculated string -} - -func (e SignatureDoesNotMatch) Error() string { - return "The request signature we calculated does not match the signature you provided" -} - // InvalidArgument invalid argument type InvalidArgument struct{} diff --git a/pkg/fs/fs-bucket-listobjects.go b/pkg/fs/fs-bucket-listobjects.go index a528d68e2..2004ed92d 100644 --- a/pkg/fs/fs-bucket-listobjects.go +++ b/pkg/fs/fs-bucket-listobjects.go @@ -25,8 +25,8 @@ import ( "strings" "time" - "github.com/minio/minio-xl/pkg/probe" "github.com/minio/minio/pkg/ioutils" + "github.com/minio/minio/pkg/probe" ) // listObjectsParams - list objects input parameters. diff --git a/pkg/fs/fs-bucket.go b/pkg/fs/fs-bucket.go index f7390f356..0a6ac14ee 100644 --- a/pkg/fs/fs-bucket.go +++ b/pkg/fs/fs-bucket.go @@ -22,8 +22,8 @@ import ( "path/filepath" "strings" - "github.com/minio/minio-xl/pkg/probe" "github.com/minio/minio/pkg/disk" + "github.com/minio/minio/pkg/probe" ) /// Bucket Operations diff --git a/pkg/fs/fs-multipart.go b/pkg/fs/fs-multipart.go index 21620267c..1e6374a2d 100644 --- a/pkg/fs/fs-multipart.go +++ b/pkg/fs/fs-multipart.go @@ -33,12 +33,13 @@ import ( "strings" "time" - "github.com/minio/minio-xl/pkg/atomic" - "github.com/minio/minio-xl/pkg/crypto/sha256" - "github.com/minio/minio-xl/pkg/crypto/sha512" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/atomic" + "github.com/minio/minio/pkg/crypto/sha256" + "github.com/minio/minio/pkg/crypto/sha512" "github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/mimedb" + "github.com/minio/minio/pkg/probe" + signV4 "github.com/minio/minio/pkg/signature" ) // isValidUploadID - is upload id. @@ -264,7 +265,7 @@ func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } // CreateObjectPart - create a part in a multipart session -func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *Signature) (string, *probe.Error) { +func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum string, partID int, size int64, data io.Reader, signature *signV4.Signature) (string, *probe.Error) { di, err := disk.GetInfo(fs.path) if err != nil { return "", probe.NewError(err) @@ -350,7 +351,7 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s } if !ok { partFile.CloseAndPurge() - return "", probe.NewError(SignatureDoesNotMatch{}) + return "", probe.NewError(signV4.SigDoesNotMatch{}) } } partFile.Close() @@ -398,7 +399,7 @@ func (fs Filesystem) CreateObjectPart(bucket, object, uploadID, expectedMD5Sum s } // CompleteMultipartUpload - complete a multipart upload and persist the data -func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { +func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { // Check bucket name is valid. if !IsValidBucketName(bucket) { return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) @@ -447,7 +448,7 @@ func (fs Filesystem) CompleteMultipartUpload(bucket, object, uploadID string, da } if !ok { file.CloseAndPurge() - return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{}) + return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) } } completeMultipartUpload := &CompleteMultipartUpload{} diff --git a/pkg/fs/fs-object.go b/pkg/fs/fs-object.go index bd401b214..e9e000952 100644 --- a/pkg/fs/fs-object.go +++ b/pkg/fs/fs-object.go @@ -28,12 +28,13 @@ import ( "encoding/hex" "runtime" - "github.com/minio/minio-xl/pkg/atomic" - "github.com/minio/minio-xl/pkg/crypto/sha256" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/atomic" + "github.com/minio/minio/pkg/crypto/sha256" "github.com/minio/minio/pkg/disk" "github.com/minio/minio/pkg/ioutils" "github.com/minio/minio/pkg/mimedb" + "github.com/minio/minio/pkg/probe" + signV4 "github.com/minio/minio/pkg/signature" ) /// Object Operations @@ -198,7 +199,7 @@ func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) bool { } // CreateObject - create an object. -func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *Signature) (ObjectMetadata, *probe.Error) { +func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { di, e := disk.GetInfo(fs.path) if e != nil { return ObjectMetadata{}, probe.NewError(e) @@ -293,7 +294,7 @@ func (fs Filesystem) CreateObject(bucket, object, expectedMD5Sum string, size in } if !ok { file.CloseAndPurge() - return ObjectMetadata{}, probe.NewError(SignatureDoesNotMatch{}) + return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) } } file.Close() diff --git a/pkg/fs/fs.go b/pkg/fs/fs.go index 44462b8e7..a4a395a9c 100644 --- a/pkg/fs/fs.go +++ b/pkg/fs/fs.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // Filesystem - local variables diff --git a/vendor/github.com/minio/minio-xl/pkg/minhttp/LICENSE.Facebook b/pkg/minhttp/LICENSE.Facebook similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/minhttp/LICENSE.Facebook rename to pkg/minhttp/LICENSE.Facebook diff --git a/pkg/minhttp/LICENSE.Minio b/pkg/minhttp/LICENSE.Minio new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/pkg/minhttp/LICENSE.Minio @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/minio/minio-xl/pkg/minhttp/http_nix.go b/pkg/minhttp/http_nix.go similarity index 99% rename from vendor/github.com/minio/minio-xl/pkg/minhttp/http_nix.go rename to pkg/minhttp/http_nix.go index 93d7f37bb..0b7017c56 100644 --- a/vendor/github.com/minio/minio-xl/pkg/minhttp/http_nix.go +++ b/pkg/minhttp/http_nix.go @@ -34,7 +34,7 @@ import ( "time" "github.com/facebookgo/httpdown" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // An app contains one or more servers and their associated configuration. diff --git a/vendor/github.com/minio/minio-xl/pkg/minhttp/http_windows.go b/pkg/minhttp/http_windows.go similarity index 99% rename from vendor/github.com/minio/minio-xl/pkg/minhttp/http_windows.go rename to pkg/minhttp/http_windows.go index 223418a18..103e922d6 100644 --- a/vendor/github.com/minio/minio-xl/pkg/minhttp/http_windows.go +++ b/pkg/minhttp/http_windows.go @@ -34,7 +34,7 @@ import ( "time" "github.com/facebookgo/httpdown" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // An app contains one or more servers and their associated configuration. diff --git a/vendor/github.com/minio/minio-xl/pkg/minhttp/kill_windows.go b/pkg/minhttp/kill_windows.go similarity index 98% rename from vendor/github.com/minio/minio-xl/pkg/minhttp/kill_windows.go rename to pkg/minhttp/kill_windows.go index 1862867a4..38c26cf1c 100644 --- a/vendor/github.com/minio/minio-xl/pkg/minhttp/kill_windows.go +++ b/pkg/minhttp/kill_windows.go @@ -1,4 +1,4 @@ -// !build windows +// +build windows /* * Minio Cloud Storage, (C) 2015 Minio, Inc. diff --git a/vendor/github.com/minio/minio-xl/pkg/minhttp/listen.go b/pkg/minhttp/listen.go similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/minhttp/listen.go rename to pkg/minhttp/listen.go diff --git a/vendor/github.com/minio/minio-xl/pkg/minhttp/net.go b/pkg/minhttp/net.go similarity index 99% rename from vendor/github.com/minio/minio-xl/pkg/minhttp/net.go rename to pkg/minhttp/net.go index 8bc1fdba9..66398e5ce 100644 --- a/vendor/github.com/minio/minio-xl/pkg/minhttp/net.go +++ b/pkg/minhttp/net.go @@ -25,7 +25,7 @@ import ( "strings" "sync" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // This package is a fork https://github.com/facebookgo/grace @@ -34,7 +34,7 @@ import ( // This package provides a family of Listen functions that either open a // fresh connection or provide an inherited connection from when the process -// was started. This behaves like their counterparts in the net pacakge, but +// was started. This behaves like their counterparts in the net package, but // transparently provide support for graceful restarts without dropping // connections. This is provided in a systemd socket activation compatible form // to allow using socket activation. diff --git a/vendor/github.com/minio/minio-xl/pkg/probe/probe.go b/pkg/probe/probe.go similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/probe/probe.go rename to pkg/probe/probe.go diff --git a/vendor/github.com/minio/minio-xl/pkg/probe/probe_test.go b/pkg/probe/probe_test.go similarity index 93% rename from vendor/github.com/minio/minio-xl/pkg/probe/probe_test.go rename to pkg/probe/probe_test.go index a5e4db2aa..6f064843a 100644 --- a/vendor/github.com/minio/minio-xl/pkg/probe/probe_test.go +++ b/pkg/probe/probe_test.go @@ -19,7 +19,7 @@ import ( "os" "testing" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" . "gopkg.in/check.v1" ) @@ -44,7 +44,7 @@ func testDummy2() *probe.Error { func (s *MySuite) TestProbe(c *C) { probe.Init() // Set project's root source path. - probe.SetAppInfo("Release-Tag", "RELEASE.Sat-19-Sep-2015-06-15-16-GMT") + probe.SetAppInfo("Commit-ID", "7390cc957239") es := testDummy2().Trace("TopOfStack") // Uncomment the following Println to visually test probe call trace. // fmt.Println("Expecting a simulated error here.", es) diff --git a/vendor/github.com/minio/minio-xl/pkg/probe/wrapper.go b/pkg/probe/wrapper.go similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/probe/wrapper.go rename to pkg/probe/wrapper.go diff --git a/vendor/github.com/minio/minio-xl/pkg/quick/errorutil.go b/pkg/quick/errorutil.go similarity index 100% rename from vendor/github.com/minio/minio-xl/pkg/quick/errorutil.go rename to pkg/quick/errorutil.go diff --git a/vendor/github.com/minio/minio-xl/pkg/quick/quick.go b/pkg/quick/quick.go similarity index 97% rename from vendor/github.com/minio/minio-xl/pkg/quick/quick.go rename to pkg/quick/quick.go index ce4647eb5..471939eb1 100644 --- a/vendor/github.com/minio/minio-xl/pkg/quick/quick.go +++ b/pkg/quick/quick.go @@ -30,8 +30,8 @@ import ( "sync" "github.com/fatih/structs" - "github.com/minio/minio-xl/pkg/atomic" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/atomic" + "github.com/minio/minio/pkg/probe" ) // Config - generic config interface functions @@ -51,7 +51,8 @@ type config struct { lock *sync.RWMutex } -// CheckData - checks the validity of config data. Data sould be of type struct and contain a string type field called "Version" +// CheckData - checks the validity of config data. Data should be of +// type struct and contain a string type field called "Version". func CheckData(data interface{}) *probe.Error { if !structs.IsStruct(data) { return probe.NewError(fmt.Errorf("Invalid argument type. Expecing \"struct\" type.")) diff --git a/vendor/github.com/minio/minio-xl/pkg/quick/quick_test.go b/pkg/quick/quick_test.go similarity index 99% rename from vendor/github.com/minio/minio-xl/pkg/quick/quick_test.go rename to pkg/quick/quick_test.go index 7c0b05426..d5afa5db5 100644 --- a/vendor/github.com/minio/minio-xl/pkg/quick/quick_test.go +++ b/pkg/quick/quick_test.go @@ -22,7 +22,7 @@ import ( "os" "testing" - "github.com/minio/minio-xl/pkg/quick" + "github.com/minio/minio/pkg/quick" . "gopkg.in/check.v1" ) diff --git a/pkg/signature/errors.go b/pkg/signature/errors.go new file mode 100644 index 000000000..961a42390 --- /dev/null +++ b/pkg/signature/errors.go @@ -0,0 +1,48 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package signature + +// MissingDateHeader date header missing +type MissingDateHeader struct{} + +func (e MissingDateHeader) Error() string { + return "Missing date header" +} + +// MissingExpiresQuery expires query string missing +type MissingExpiresQuery struct{} + +func (e MissingExpiresQuery) Error() string { + return "Missing expires query string" +} + +// ExpiredPresignedRequest request already expired +type ExpiredPresignedRequest struct{} + +func (e ExpiredPresignedRequest) Error() string { + return "Presigned request already expired" +} + +// SigDoesNotMatch invalid signature +type SigDoesNotMatch struct { + SignatureSent string + SignatureCalculated string +} + +func (e SigDoesNotMatch) Error() string { + return "The request signature we calculated does not match the signature you provided" +} diff --git a/pkg/fs/postpolicyform.go b/pkg/signature/postpolicyform.go similarity index 98% rename from pkg/fs/postpolicyform.go rename to pkg/signature/postpolicyform.go index d06bf7761..982cc5048 100644 --- a/pkg/fs/postpolicyform.go +++ b/pkg/signature/postpolicyform.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package fs +package signature import ( "encoding/json" @@ -22,7 +22,7 @@ import ( "reflect" "time" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // toString - Safely convert interface to string without causing panic. diff --git a/pkg/fs/signature.go b/pkg/signature/signature-v4.go similarity index 98% rename from pkg/fs/signature.go rename to pkg/signature/signature-v4.go index 5ef933b94..5d8a10a64 100644 --- a/pkg/fs/signature.go +++ b/pkg/signature/signature-v4.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package fs +package signature import ( "bytes" @@ -29,8 +29,8 @@ import ( "time" "unicode/utf8" - "github.com/minio/minio-xl/pkg/crypto/sha256" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/crypto/sha256" + "github.com/minio/minio/pkg/probe" ) // Signature - local variables @@ -243,7 +243,8 @@ func (r Signature) getScope(t time.Time) string { func (r Signature) getStringToSign(canonicalRequest string, t time.Time) string { stringToSign := authHeaderPrefix + "\n" + t.Format(iso8601Format) + "\n" stringToSign = stringToSign + r.getScope(t) + "\n" - stringToSign = stringToSign + hex.EncodeToString(sha256.Sum256([]byte(canonicalRequest))) + canonicalRequestBytes := sha256.Sum256([]byte(canonicalRequest)) + stringToSign = stringToSign + hex.EncodeToString(canonicalRequestBytes[:]) return stringToSign } diff --git a/pkg/tasker/commands.go b/pkg/tasker/commands.go new file mode 100644 index 000000000..032bd566c --- /dev/null +++ b/pkg/tasker/commands.go @@ -0,0 +1,44 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tasker + +// Command is number that uniquely identifies a command function. +type Command uint8 + +// Enumerate the task commands. +const ( + // CmdNOOP does nothing. It is a default placeholder. Uninitialized variable of this type will point to NOOP command by default. + CmdNOOP Command = iota + // CmdSignalEnd gracefully ends current task. Never ending tasks (loop over) or Batched jobs will not take the next iteration, + // but may finish the current state to completion. + CmdSignalEnd + // CmdSignalAbort ends the current task at hand immediately. It may still cleanup dangling issues quickly. + CmdSignalAbort + // CmdSignalSuspend suspends the current task. + CmdSignalSuspend + // CmdSignalResume resumes a suspended task. + CmdSignalResume + // CmdPriorityLow is optimized to conserve resources and complete the task at a slow pace. This option is ideal for batch processed tasks. + CmdPriorityLow + // CmdPriorityMedium is the default priority. It is a balanced option between resources and speed. + CmdPriorityMedium + // CmdPriorityHigh is optimized for speed. This option is ideal for short lived tasks (like meta-data related) that are latency sensitive. Use this option wisely. + CmdPriorityHigh + // CmdPrioritySuper is an exclusive priority. All tasks with priority lower than Super (including High) are paused + // temporarily until this task completes. Anytime you consider using this priority level, please seek for approval. + CmdPrioritySuper +) diff --git a/pkg/tasker/handle.go b/pkg/tasker/handle.go new file mode 100644 index 000000000..c3330dede --- /dev/null +++ b/pkg/tasker/handle.go @@ -0,0 +1,56 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tasker + +import "github.com/minio/minio/pkg/probe" + +// Handle as the name suggests is a handle (self reference) to its +// own task structure. Task has limited privileges over itself. Only the +// task controller (TaskCtl) can manage the task by sending commands to +// the task over channels. +type Handle struct { + this taskRef + cmdCh <-chan Command // Channel to receive commands from TaskCtl. + statusCh chan<- status // Channel to send completion status and error (if any) to TaskCtl. + closeCh chan<- taskRef // Channel to notify the TaskCtl about ending this task. +} + +// Listen returns a channel to receive commands. +func (t Handle) Listen() <-chan Command { + return t.cmdCh +} + +// StatusDone acknowledges successful completion of a command. +func (t Handle) StatusDone() { + t.statusCh <- status{code: statusDone, err: nil} +} + +// StatusBusy rejects a command with busy status. +func (t Handle) StatusBusy() { + t.statusCh <- status{code: statusBusy, err: nil} +} + +// StatusFail returns failure status. +func (t Handle) StatusFail(err *probe.Error) { + t.statusCh <- status{code: statusFail, err: err} +} + +// Close notifies the TaskCtl about the end of this Task. Owner of the +// task must invoke Close() when it is done performing its job. +func (t Handle) Close() { + t.closeCh <- t.this +} diff --git a/pkg/tasker/status.go b/pkg/tasker/status.go new file mode 100644 index 000000000..f1ed47eb3 --- /dev/null +++ b/pkg/tasker/status.go @@ -0,0 +1,35 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tasker + +import "github.com/minio/minio/pkg/probe" + +// StatusCode denotes the completion status of a command. +type statusCode int8 + +// Enumerate task return status codes. +const ( + statusDone statusCode = iota + statusBusy + statusFail +) + +// Status returns the completion status and error (if any) of a command. +type status struct { + code statusCode // Completion code. + err *probe.Error // Error if any. +} diff --git a/pkg/tasker/task.go b/pkg/tasker/task.go new file mode 100644 index 000000000..a3aa7ea63 --- /dev/null +++ b/pkg/tasker/task.go @@ -0,0 +1,103 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tasker + +import ( + "container/list" + "sync" +) + +// NOTE: Task is a private entity. It is created and managed by TaskCtl +// entirely. Only TaskCtl and Handle objects are exposed outside. + +// taskRef is a unique reference ID to a task. It is assigned by the +// TaskCtl during the creation of a task. All tasfRef variables are +// named "this". +type taskRef *list.Element + +// Task is an abstract concept built on top of Go routines and +// channels. Tasks themselves are expected to co-operate and comply with +// the TaskCtl commands. + +type task struct { + mutex *sync.Mutex + + this taskRef // Refence to task entry in the TaskCtl's task list. + name string // Free form name. + priority Command // Current priority. + cmdCh chan Command // Channel to receive commands from TaskCtl. + statusCh chan status // Channel to send completion status and error (if any) to TaskCtl. + closeCh chan taskRef // Channel to notify the TaskCtl about ending this task. +} + +// NewTask creates a new task structure and returns a handle to +// it. Only the task controller has access to the task structure. The +// caller routine only receives a handle to its task structure. Task +// handle is like a reference to task self. Caller is expected to listen +// for commands from the task controller and comply with it co-operatively. +// this: Task reference is unique identifier assigned by the TaskCtl. +// name: Free form name of the task. Eg. "Late Night Disk Scrubber". +func newTask(name string) task { + return task{ + // this: Is set by the TaskCtl's NewTask function. + mutex: &sync.Mutex{}, + name: name, + priority: CmdPriorityMedium, + cmdCh: make(chan Command), + statusCh: make(chan status), + closeCh: make(chan taskRef), + } +} + +// getHandle returns a handle to the task. Handle has limited access to the task structure and it is safe to be exposed. +func (t task) getHandle() Handle { + t.mutex.Lock() + defer t.mutex.Unlock() + + // Make a handle with limited access to channels (only send or receive). + return Handle{ + cmdCh: t.cmdCh, + statusCh: t.statusCh, + closeCh: t.closeCh, + } +} + +// command method sends a command code to the task and returns its completion status. +func (t task) command(cmd Command) status { + t.mutex.Lock() + defer t.mutex.Unlock() + + t.cmdCh <- cmd + return <-t.statusCh +} + +// close releases all the resources held by this task. +func (t task) close() { + t.mutex.Lock() + defer t.mutex.Unlock() + + // Task can be ended in 2 ways. + // 1) Calling application invokes Handle.Close(). + // 2) TaskCtl.Shutdown() ending the task's life. + // In either case, task.close() is invoked only via the + // TaskCtl. Handle.Close() only sends a message to the TaskCtl to + // initiate a close call. + + close(t.cmdCh) + close(t.statusCh) + close(t.closeCh) +} diff --git a/pkg/tasker/taskctl.go b/pkg/tasker/taskctl.go new file mode 100644 index 000000000..ab63f7b2e --- /dev/null +++ b/pkg/tasker/taskctl.go @@ -0,0 +1,164 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tasker + +import ( + "container/list" + "sync" +) + +// TaskCtl (Task Controller) is a framework to create and manage +// tasks. +type TaskCtl struct { + mutex *sync.Mutex // Lock + // List of tasks managed by this task controller. + tasks *list.List +} + +// New creates a new TaskCtl to create and control a collection of tasks. +// Single application can create multiple task controllers to manage different set of tasks separately. +func New(name string) *TaskCtl { + return &TaskCtl{ + mutex: &sync.Mutex{}, + tasks: list.New(), + } +} + +// NewTask creates a new task structure and returns a handle to it. Only the task controller +// has access to the task structure. The caller routine only receives a handle to its task structure. +// Task handle is like a reference to task self. Caller is expected to listen for commands from +// the task controller and comply with it co-operatively. +func (tc *TaskCtl) NewTask(name string) Handle { + tc.mutex.Lock() + defer tc.mutex.Unlock() + + // Create a new task. + tsk := newTask(name) + + // Register this task in the TaskCtl's tasklist and save the reference. + tsk.this = tc.tasks.PushBack(tsk) + + // Free task from the tasklist upon close call. + go func() { + // Release the tasks resources upon return of this function. + defer tsk.close() + + // Will be notified here upon task's end of life. + this := <-tsk.closeCh + + tc.mutex.Lock() + defer tc.mutex.Unlock() + + // Release the task structure from the task list. + tc.tasks.Remove(this) + }() + + // Return a handle to this task. + return tsk.getHandle() +} + +// Shutdown ends all tasks, including the suspended ones. +func (tc *TaskCtl) Shutdown() { + tc.mutex.Lock() + defer tc.mutex.Unlock() + + var wg sync.WaitGroup + + // End all tasks. + for e := tc.tasks.Front(); e != nil; e = e.Next() { + wg.Add(1) + thisTask := e.Value.(task) // Make a local copy for go routine. + // End tasks in background. Flow of events from here is as follows: thisTask.handle.Close() -> tc.NewTask() -> this.task.close(). + go func() { + thisTask.getHandle().Close() + wg.Done() + }() + } + + wg.Wait() // Wait for all tasks to end gracefully. + + // Reset the task pool. + tc.tasks = nil +} + +// Suspend puts all tasks to sleep. +func (tc *TaskCtl) Suspend() bool { + tc.mutex.Lock() + defer tc.mutex.Unlock() + + var wg sync.WaitGroup + + // If any one of the task fails to suspend, this flag will set to false. + statusAll := make([]status, tc.tasks.Len()) + + // Suspend all tasks. + i := 0 + for e := tc.tasks.Front(); e != nil; e = e.Next() { + wg.Add(1) + locTask := e.Value.(task) // Make a local copy for go routine. + locI := i // local i + // Suspend a task in background. + go func(locI int) { + defer wg.Done() + statusAll[locI] = locTask.command(CmdSignalSuspend) + }(locI) + i++ + } + + wg.Wait() // Wait for all tasks to suspend gracefully. + + for _, st := range statusAll { + if st.code != statusDone { + return false + } + } + return true +} + +// Resume wakes up all suspended task from sleep. +func (tc *TaskCtl) Resume() bool { + tc.mutex.Lock() + defer tc.mutex.Unlock() + + var wg sync.WaitGroup + + // If any one of the task fails to suspend, this flag will set to false. + statusAll := make([]status, tc.tasks.Len()) + + i := 0 + // Resume all suspended tasks. + for e := tc.tasks.Front(); e != nil; e = e.Next() { + wg.Add(1) + locTask := e.Value.(task) // Make a local copy for go routine. + locI := i // local i + // Resume a task in background. + go func(locI int) { + defer wg.Done() + statusAll[locI] = locTask.command(CmdSignalResume) + }(locI) + i++ + } + wg.Wait() // Wait for all tasks to resume. + + for _, st := range statusAll { + if st.code != statusDone { + return false + } + } + return true + +} diff --git a/pkg/tasker/taskctl_test.go b/pkg/tasker/taskctl_test.go new file mode 100644 index 000000000..4dae9c0ea --- /dev/null +++ b/pkg/tasker/taskctl_test.go @@ -0,0 +1,38 @@ +/* + * Quick - Quick key value store for config files and persistent state files + * + * Minio Client (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package tasker_test + +import ( + "testing" + + "github.com/minio/minio/pkg/tasker" + + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func (s *MySuite) TestCheckData(c *C) { + testTasks := tasker.New("Test Task") + testTasks.Shutdown() + // c.Assert(err, Not(IsNil)) +} diff --git a/pkg/xl/LICENSE b/pkg/xl/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/pkg/xl/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pkg/xl/README.md b/pkg/xl/README.md new file mode 100644 index 000000000..36ac7631d --- /dev/null +++ b/pkg/xl/README.md @@ -0,0 +1,3 @@ +# XL + +xl - XL distributed erasure coded on-disk format released under [Apache license v2](./LICENSE). diff --git a/pkg/xl/acl.go b/pkg/xl/acl.go new file mode 100644 index 000000000..796b5954a --- /dev/null +++ b/pkg/xl/acl.go @@ -0,0 +1,47 @@ +package xl + +// BucketACL - bucket level access control +type BucketACL string + +// different types of ACL's currently supported for buckets +const ( + BucketPrivate = BucketACL("private") + BucketPublicRead = BucketACL("public-read") + BucketPublicReadWrite = BucketACL("public-read-write") +) + +func (b BucketACL) String() string { + return string(b) +} + +// IsPrivate - is acl Private +func (b BucketACL) IsPrivate() bool { + return b == BucketACL("private") +} + +// IsPublicRead - is acl PublicRead +func (b BucketACL) IsPublicRead() bool { + return b == BucketACL("public-read") +} + +// IsPublicReadWrite - is acl PublicReadWrite +func (b BucketACL) IsPublicReadWrite() bool { + return b == BucketACL("public-read-write") +} + +// IsValidBucketACL - is provided acl string supported +func IsValidBucketACL(acl string) bool { + switch acl { + case "private": + fallthrough + case "public-read": + fallthrough + case "public-read-write": + return true + case "": + // by default its "private" + return true + default: + return false + } +} diff --git a/pkg/xl/block/block.go b/pkg/xl/block/block.go new file mode 100644 index 000000000..3a913b299 --- /dev/null +++ b/pkg/xl/block/block.go @@ -0,0 +1,196 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package block + +import ( + "errors" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/minio/minio/pkg/atomic" + "github.com/minio/minio/pkg/disk" + "github.com/minio/minio/pkg/probe" +) + +// Block container for block disk parameters +type Block struct { + lock *sync.Mutex + path string + fsInfo disk.Info +} + +// ErrInvalidArgument - invalid argument. +var ErrInvalidArgument = errors.New("Invalid argument") + +// New - instantiate new disk +func New(diskPath string) (Block, *probe.Error) { + if diskPath == "" { + return Block{}, probe.NewError(ErrInvalidArgument) + } + st, err := os.Stat(diskPath) + if err != nil { + return Block{}, probe.NewError(err) + } + + if !st.IsDir() { + return Block{}, probe.NewError(syscall.ENOTDIR) + } + info, err := disk.GetInfo(diskPath) + if err != nil { + return Block{}, probe.NewError(err) + } + disk := Block{ + lock: &sync.Mutex{}, + path: diskPath, + fsInfo: info, + } + return disk, nil +} + +// IsUsable - is disk usable, alive +func (d Block) IsUsable() bool { + _, err := os.Stat(d.path) + if err != nil { + return false + } + return true +} + +// GetPath - get root disk path +func (d Block) GetPath() string { + return d.path +} + +// GetFSInfo - get disk filesystem and its usage information +func (d Block) GetFSInfo() disk.Info { + d.lock.Lock() + defer d.lock.Unlock() + + info, err := disk.GetInfo(d.path) + if err != nil { + return d.fsInfo + } + d.fsInfo = info + return info +} + +// MakeDir - make a directory inside disk root path +func (d Block) MakeDir(dirname string) *probe.Error { + d.lock.Lock() + defer d.lock.Unlock() + if err := os.MkdirAll(filepath.Join(d.path, dirname), 0700); err != nil { + return probe.NewError(err) + } + return nil +} + +// ListDir - list a directory inside disk root path, get only directories +func (d Block) ListDir(dirname string) ([]os.FileInfo, *probe.Error) { + d.lock.Lock() + defer d.lock.Unlock() + + dir, err := os.Open(filepath.Join(d.path, dirname)) + if err != nil { + return nil, probe.NewError(err) + } + defer dir.Close() + contents, err := dir.Readdir(-1) + if err != nil { + return nil, probe.NewError(err) + } + var directories []os.FileInfo + for _, content := range contents { + // Include only directories, ignore everything else + if content.IsDir() { + directories = append(directories, content) + } + } + return directories, nil +} + +// ListFiles - list a directory inside disk root path, get only files +func (d Block) ListFiles(dirname string) ([]os.FileInfo, *probe.Error) { + d.lock.Lock() + defer d.lock.Unlock() + + dir, err := os.Open(filepath.Join(d.path, dirname)) + if err != nil { + return nil, probe.NewError(err) + } + defer dir.Close() + contents, err := dir.Readdir(-1) + if err != nil { + return nil, probe.NewError(err) + } + var files []os.FileInfo + for _, content := range contents { + // Include only regular files, ignore everything else + if content.Mode().IsRegular() { + files = append(files, content) + } + } + return files, nil +} + +// CreateFile - create a file inside disk root path, replies with custome d.File which provides atomic writes +func (d Block) CreateFile(filename string) (*atomic.File, *probe.Error) { + d.lock.Lock() + defer d.lock.Unlock() + + if filename == "" { + return nil, probe.NewError(ErrInvalidArgument) + } + + f, err := atomic.FileCreate(filepath.Join(d.path, filename)) + if err != nil { + return nil, probe.NewError(err) + } + + return f, nil +} + +// Open - read a file inside disk root path +func (d Block) Open(filename string) (*os.File, *probe.Error) { + d.lock.Lock() + defer d.lock.Unlock() + + if filename == "" { + return nil, probe.NewError(ErrInvalidArgument) + } + dataFile, err := os.Open(filepath.Join(d.path, filename)) + if err != nil { + return nil, probe.NewError(err) + } + return dataFile, nil +} + +// OpenFile - Use with caution +func (d Block) OpenFile(filename string, flags int, perm os.FileMode) (*os.File, *probe.Error) { + d.lock.Lock() + defer d.lock.Unlock() + + if filename == "" { + return nil, probe.NewError(ErrInvalidArgument) + } + dataFile, err := os.OpenFile(filepath.Join(d.path, filename), flags, perm) + if err != nil { + return nil, probe.NewError(err) + } + return dataFile, nil +} diff --git a/pkg/xl/block/block_test.go b/pkg/xl/block/block_test.go new file mode 100644 index 000000000..0840a2317 --- /dev/null +++ b/pkg/xl/block/block_test.go @@ -0,0 +1,83 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedisk. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package block + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" + + . "gopkg.in/check.v1" +) + +func TestDisk(t *testing.T) { TestingT(t) } + +type MyDiskSuite struct { + path string + d Block +} + +var _ = Suite(&MyDiskSuite{}) + +func (s *MyDiskSuite) SetUpSuite(c *C) { + path, err := ioutil.TempDir(os.TempDir(), "disk-") + c.Assert(err, IsNil) + s.path = path + d, perr := New(s.path) + c.Assert(perr, IsNil) + s.d = d +} + +func (s *MyDiskSuite) TearDownSuite(c *C) { + os.RemoveAll(s.path) +} + +func (s *MyDiskSuite) TestDiskInfo(c *C) { + c.Assert(s.path, Equals, s.d.GetPath()) + fsInfo := s.d.GetFSInfo() + c.Assert(fsInfo.FSType, Not(Equals), "UNKNOWN") +} + +func (s *MyDiskSuite) TestDiskCreateDir(c *C) { + c.Assert(s.d.MakeDir("hello"), IsNil) +} + +func (s *MyDiskSuite) TestDiskCreateFile(c *C) { + f, err := s.d.CreateFile("hello1") + c.Assert(err, IsNil) + c.Assert(f.Name(), Not(Equals), filepath.Join(s.path, "hello1")) + // close renames the file + f.Close() + + // Open should be a success + _, err = s.d.Open("hello1") + c.Assert(err, IsNil) +} + +func (s *MyDiskSuite) TestDiskOpen(c *C) { + f1, err := s.d.CreateFile("hello2") + c.Assert(err, IsNil) + c.Assert(f1.Name(), Not(Equals), filepath.Join(s.path, "hello2")) + // close renames the file + f1.Close() + + f2, err := s.d.Open("hello2") + c.Assert(err, IsNil) + c.Assert(f2.Name(), Equals, filepath.Join(s.path, "hello2")) + defer f2.Close() +} diff --git a/pkg/xl/bucket.go b/pkg/xl/bucket.go new file mode 100644 index 000000000..850850ca3 --- /dev/null +++ b/pkg/xl/bucket.go @@ -0,0 +1,639 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "bytes" + "fmt" + "hash" + "io" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "crypto/md5" + "encoding/hex" + "encoding/json" + + "github.com/minio/minio/pkg/crypto/sha256" + "github.com/minio/minio/pkg/crypto/sha512" + "github.com/minio/minio/pkg/probe" + signV4 "github.com/minio/minio/pkg/signature" + "github.com/minio/minio/pkg/xl/block" +) + +const ( + blockSize = 10 * 1024 * 1024 +) + +// internal struct carrying bucket specific information +type bucket struct { + name string + acl string + time time.Time + xlName string + nodes map[string]node + lock *sync.Mutex +} + +// newBucket - instantiate a new bucket +func newBucket(bucketName, aclType, xlName string, nodes map[string]node) (bucket, BucketMetadata, *probe.Error) { + if strings.TrimSpace(bucketName) == "" || strings.TrimSpace(xlName) == "" { + return bucket{}, BucketMetadata{}, probe.NewError(InvalidArgument{}) + } + + b := bucket{} + t := time.Now().UTC() + b.name = bucketName + b.acl = aclType + b.time = t + b.xlName = xlName + b.nodes = nodes + b.lock = new(sync.Mutex) + + metadata := BucketMetadata{} + metadata.Version = bucketMetadataVersion + metadata.Name = bucketName + metadata.ACL = BucketACL(aclType) + metadata.Created = t + metadata.Metadata = make(map[string]string) + metadata.BucketObjects = make(map[string]struct{}) + + return b, metadata, nil +} + +// getBucketName - +func (b bucket) getBucketName() string { + return b.name +} + +// getBucketMetadataReaders - +func (b bucket) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { + readers := make(map[int]io.ReadCloser) + var disks map[int]block.Block + var err *probe.Error + for _, node := range b.nodes { + disks, err = node.ListDisks() + if err != nil { + return nil, err.Trace() + } + } + var bucketMetaDataReader io.ReadCloser + for order, disk := range disks { + bucketMetaDataReader, err = disk.Open(filepath.Join(b.xlName, bucketMetadataConfig)) + if err != nil { + continue + } + readers[order] = bucketMetaDataReader + } + if err != nil { + return nil, err.Trace() + } + return readers, nil +} + +// getBucketMetadata - +func (b bucket) getBucketMetadata() (*AllBuckets, *probe.Error) { + metadata := new(AllBuckets) + var readers map[int]io.ReadCloser + { + var err *probe.Error + readers, err = b.getBucketMetadataReaders() + if err != nil { + return nil, err.Trace() + } + } + for _, reader := range readers { + defer reader.Close() + } + var err error + for _, reader := range readers { + jenc := json.NewDecoder(reader) + if err = jenc.Decode(metadata); err == nil { + return metadata, nil + } + } + return nil, probe.NewError(err) +} + +// GetObjectMetadata - get metadata for an object +func (b bucket) GetObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) { + b.lock.Lock() + defer b.lock.Unlock() + return b.readObjectMetadata(normalizeObjectName(objectName)) +} + +// ListObjects - list all objects +func (b bucket) ListObjects(prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) { + b.lock.Lock() + defer b.lock.Unlock() + if maxkeys <= 0 { + maxkeys = 1000 + } + var isTruncated bool + var objects []string + bucketMetadata, err := b.getBucketMetadata() + if err != nil { + return ListObjectsResults{}, err.Trace() + } + for objectName := range bucketMetadata.Buckets[b.getBucketName()].Multiparts { + if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) { + if objectName > marker { + objects = append(objects, objectName) + } + } + } + for objectName := range bucketMetadata.Buckets[b.getBucketName()].BucketObjects { + if strings.HasPrefix(objectName, strings.TrimSpace(prefix)) { + if objectName > marker { + objects = append(objects, objectName) + } + } + } + if strings.TrimSpace(prefix) != "" { + objects = TrimPrefix(objects, prefix) + } + var prefixes []string + var filteredObjects []string + filteredObjects = objects + if strings.TrimSpace(delimiter) != "" { + filteredObjects = HasNoDelimiter(objects, delimiter) + prefixes = HasDelimiter(objects, delimiter) + prefixes = SplitDelimiter(prefixes, delimiter) + prefixes = SortUnique(prefixes) + } + var results []string + var commonPrefixes []string + + for _, commonPrefix := range prefixes { + commonPrefixes = append(commonPrefixes, prefix+commonPrefix) + } + filteredObjects = RemoveDuplicates(filteredObjects) + sort.Strings(filteredObjects) + for _, objectName := range filteredObjects { + if len(results) >= maxkeys { + isTruncated = true + break + } + results = append(results, prefix+objectName) + } + results = RemoveDuplicates(results) + commonPrefixes = RemoveDuplicates(commonPrefixes) + sort.Strings(commonPrefixes) + + listObjects := ListObjectsResults{} + listObjects.Objects = make(map[string]ObjectMetadata) + listObjects.CommonPrefixes = commonPrefixes + listObjects.IsTruncated = isTruncated + + for _, objectName := range results { + objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName)) + if err != nil { + return ListObjectsResults{}, err.Trace() + } + listObjects.Objects[objectName] = objMetadata + } + return listObjects, nil +} + +// ReadObject - open an object to read +func (b bucket) ReadObject(objectName string) (reader io.ReadCloser, size int64, err *probe.Error) { + b.lock.Lock() + defer b.lock.Unlock() + reader, writer := io.Pipe() + // get list of objects + bucketMetadata, err := b.getBucketMetadata() + if err != nil { + return nil, 0, err.Trace() + } + // check if object exists + if _, ok := bucketMetadata.Buckets[b.getBucketName()].BucketObjects[objectName]; !ok { + return nil, 0, probe.NewError(ObjectNotFound{Object: objectName}) + } + objMetadata, err := b.readObjectMetadata(normalizeObjectName(objectName)) + if err != nil { + return nil, 0, err.Trace() + } + // read and reply back to GetObject() request in a go-routine + go b.readObjectData(normalizeObjectName(objectName), writer, objMetadata) + return reader, objMetadata.Size, nil +} + +// WriteObject - write a new object into bucket +func (b bucket) WriteObject(objectName string, objectData io.Reader, size int64, expectedMD5Sum string, metadata map[string]string, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { + b.lock.Lock() + defer b.lock.Unlock() + if objectName == "" || objectData == nil { + return ObjectMetadata{}, probe.NewError(InvalidArgument{}) + } + writers, err := b.getObjectWriters(normalizeObjectName(objectName), "data") + if err != nil { + return ObjectMetadata{}, err.Trace() + } + sumMD5 := md5.New() + sum512 := sha512.New() + var sum256 hash.Hash + var mwriter io.Writer + + if signature != nil { + sum256 = sha256.New() + mwriter = io.MultiWriter(sumMD5, sum256, sum512) + } else { + mwriter = io.MultiWriter(sumMD5, sum512) + } + objMetadata := ObjectMetadata{} + objMetadata.Version = objectMetadataVersion + objMetadata.Created = time.Now().UTC() + // if total writers are only '1' do not compute erasure + switch len(writers) == 1 { + case true: + mw := io.MultiWriter(writers[0], mwriter) + totalLength, err := io.Copy(mw, objectData) + if err != nil { + CleanupWritersOnError(writers) + return ObjectMetadata{}, probe.NewError(err) + } + objMetadata.Size = totalLength + case false: + // calculate data and parity dictated by total number of writers + k, m, err := b.getDataAndParity(len(writers)) + if err != nil { + CleanupWritersOnError(writers) + return ObjectMetadata{}, err.Trace() + } + // write encoded data with k, m and writers + chunkCount, totalLength, err := b.writeObjectData(k, m, writers, objectData, size, mwriter) + if err != nil { + CleanupWritersOnError(writers) + return ObjectMetadata{}, err.Trace() + } + /// xlMetadata section + objMetadata.BlockSize = blockSize + objMetadata.ChunkCount = chunkCount + objMetadata.DataDisks = k + objMetadata.ParityDisks = m + objMetadata.Size = int64(totalLength) + } + objMetadata.Bucket = b.getBucketName() + objMetadata.Object = objectName + dataMD5sum := sumMD5.Sum(nil) + dataSHA512sum := sum512.Sum(nil) + if signature != nil { + ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sum256.Sum(nil))) + if err != nil { + // error occurred while doing signature calculation, we return and also cleanup any temporary writers. + CleanupWritersOnError(writers) + return ObjectMetadata{}, err.Trace() + } + if !ok { + // purge all writers, when control flow reaches here + // + // Signature mismatch occurred all temp files to be removed and all data purged. + CleanupWritersOnError(writers) + return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) + } + } + objMetadata.MD5Sum = hex.EncodeToString(dataMD5sum) + objMetadata.SHA512Sum = hex.EncodeToString(dataSHA512sum) + + // Verify if the written object is equal to what is expected, only if it is requested as such + if strings.TrimSpace(expectedMD5Sum) != "" { + if err := b.isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), objMetadata.MD5Sum); err != nil { + return ObjectMetadata{}, err.Trace() + } + } + objMetadata.Metadata = metadata + // write object specific metadata + if err := b.writeObjectMetadata(normalizeObjectName(objectName), objMetadata); err != nil { + // purge all writers, when control flow reaches here + CleanupWritersOnError(writers) + return ObjectMetadata{}, err.Trace() + } + // close all writers, when control flow reaches here + for _, writer := range writers { + writer.Close() + } + return objMetadata, nil +} + +// isMD5SumEqual - returns error if md5sum mismatches, other its `nil` +func (b bucket) isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error { + if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" { + expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum) + if err != nil { + return probe.NewError(err) + } + actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum) + if err != nil { + return probe.NewError(err) + } + if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) { + return probe.NewError(BadDigest{}) + } + return nil + } + return probe.NewError(InvalidArgument{}) +} + +// writeObjectMetadata - write additional object metadata +func (b bucket) writeObjectMetadata(objectName string, objMetadata ObjectMetadata) *probe.Error { + if objMetadata.Object == "" { + return probe.NewError(InvalidArgument{}) + } + objMetadataWriters, err := b.getObjectWriters(objectName, objectMetadataConfig) + if err != nil { + return err.Trace() + } + for _, objMetadataWriter := range objMetadataWriters { + jenc := json.NewEncoder(objMetadataWriter) + if err := jenc.Encode(&objMetadata); err != nil { + // Close writers and purge all temporary entries + CleanupWritersOnError(objMetadataWriters) + return probe.NewError(err) + } + } + for _, objMetadataWriter := range objMetadataWriters { + objMetadataWriter.Close() + } + return nil +} + +// readObjectMetadata - read object metadata +func (b bucket) readObjectMetadata(objectName string) (ObjectMetadata, *probe.Error) { + if objectName == "" { + return ObjectMetadata{}, probe.NewError(InvalidArgument{}) + } + objMetadata := ObjectMetadata{} + objMetadataReaders, err := b.getObjectReaders(objectName, objectMetadataConfig) + if err != nil { + return ObjectMetadata{}, err.Trace() + } + for _, objMetadataReader := range objMetadataReaders { + defer objMetadataReader.Close() + } + { + var err error + for _, objMetadataReader := range objMetadataReaders { + jdec := json.NewDecoder(objMetadataReader) + if err = jdec.Decode(&objMetadata); err == nil { + return objMetadata, nil + } + } + return ObjectMetadata{}, probe.NewError(err) + } +} + +// TODO - This a temporary normalization of objectNames, need to find a better way +// +// normalizedObjectName - all objectNames with "/" get normalized to a simple objectName +// +// example: +// user provided value - "this/is/my/deep/directory/structure" +// xl normalized value - "this-is-my-deep-directory-structure" +// +func normalizeObjectName(objectName string) string { + // replace every '/' with '-' + return strings.Replace(objectName, "/", "-", -1) +} + +// getDataAndParity - calculate k, m (data and parity) values from number of disks +func (b bucket) getDataAndParity(totalWriters int) (k uint8, m uint8, err *probe.Error) { + if totalWriters <= 1 { + return 0, 0, probe.NewError(InvalidArgument{}) + } + quotient := totalWriters / 2 // not using float or abs to let integer round off to lower value + // quotient cannot be bigger than (255 / 2) = 127 + if quotient > 127 { + return 0, 0, probe.NewError(ParityOverflow{}) + } + remainder := totalWriters % 2 // will be 1 for odd and 0 for even numbers + k = uint8(quotient + remainder) + m = uint8(quotient) + return k, m, nil +} + +// writeObjectData - +func (b bucket) writeObjectData(k, m uint8, writers []io.WriteCloser, objectData io.Reader, size int64, hashWriter io.Writer) (int, int, *probe.Error) { + encoder, err := newEncoder(k, m) + if err != nil { + return 0, 0, err.Trace() + } + chunkSize := int64(10 * 1024 * 1024) + chunkCount := 0 + totalLength := 0 + + var e error + for e == nil { + var length int + inputData := make([]byte, chunkSize) + length, e = objectData.Read(inputData) + if length != 0 { + encodedBlocks, err := encoder.Encode(inputData[0:length]) + if err != nil { + return 0, 0, err.Trace() + } + if _, err := hashWriter.Write(inputData[0:length]); err != nil { + return 0, 0, probe.NewError(err) + } + for blockIndex, block := range encodedBlocks { + errCh := make(chan error, 1) + go func(writer io.Writer, reader io.Reader, errCh chan<- error) { + defer close(errCh) + _, err := io.Copy(writer, reader) + errCh <- err + }(writers[blockIndex], bytes.NewReader(block), errCh) + if err := <-errCh; err != nil { + // Returning error is fine here CleanupErrors() would cleanup writers + return 0, 0, probe.NewError(err) + } + } + totalLength += length + chunkCount = chunkCount + 1 + } + } + if e != io.EOF { + return 0, 0, probe.NewError(e) + } + return chunkCount, totalLength, nil +} + +// readObjectData - +func (b bucket) readObjectData(objectName string, writer *io.PipeWriter, objMetadata ObjectMetadata) { + readers, err := b.getObjectReaders(objectName, "data") + if err != nil { + writer.CloseWithError(probe.WrapError(err)) + return + } + for _, reader := range readers { + defer reader.Close() + } + var expected512Sum, expectedMd5sum []byte + { + var err error + expectedMd5sum, err = hex.DecodeString(objMetadata.MD5Sum) + if err != nil { + writer.CloseWithError(probe.WrapError(probe.NewError(err))) + return + } + expected512Sum, err = hex.DecodeString(objMetadata.SHA512Sum) + if err != nil { + writer.CloseWithError(probe.WrapError(probe.NewError(err))) + return + } + } + hasher := md5.New() + sum512hasher := sha256.New() + mwriter := io.MultiWriter(writer, hasher, sum512hasher) + switch len(readers) > 1 { + case true: + encoder, err := newEncoder(objMetadata.DataDisks, objMetadata.ParityDisks) + if err != nil { + writer.CloseWithError(probe.WrapError(err)) + return + } + totalLeft := objMetadata.Size + for i := 0; i < objMetadata.ChunkCount; i++ { + decodedData, err := b.decodeEncodedData(totalLeft, int64(objMetadata.BlockSize), readers, encoder, writer) + if err != nil { + writer.CloseWithError(probe.WrapError(err)) + return + } + if _, err := io.Copy(mwriter, bytes.NewReader(decodedData)); err != nil { + writer.CloseWithError(probe.WrapError(probe.NewError(err))) + return + } + totalLeft = totalLeft - int64(objMetadata.BlockSize) + } + case false: + _, err := io.Copy(writer, readers[0]) + if err != nil { + writer.CloseWithError(probe.WrapError(probe.NewError(err))) + return + } + } + // check if decodedData md5sum matches + if !bytes.Equal(expectedMd5sum, hasher.Sum(nil)) { + writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{}))) + return + } + if !bytes.Equal(expected512Sum, sum512hasher.Sum(nil)) { + writer.CloseWithError(probe.WrapError(probe.NewError(ChecksumMismatch{}))) + return + } + writer.Close() + return +} + +// decodeEncodedData - +func (b bucket) decodeEncodedData(totalLeft, blockSize int64, readers map[int]io.ReadCloser, encoder encoder, writer *io.PipeWriter) ([]byte, *probe.Error) { + var curBlockSize int64 + if blockSize < totalLeft { + curBlockSize = blockSize + } else { + curBlockSize = totalLeft + } + curChunkSize, err := encoder.GetEncodedBlockLen(int(curBlockSize)) + if err != nil { + return nil, err.Trace() + } + encodedBytes := make([][]byte, encoder.k+encoder.m) + errCh := make(chan error, len(readers)) + var errRet error + var readCnt int + + for i, reader := range readers { + go func(reader io.Reader, i int) { + encodedBytes[i] = make([]byte, curChunkSize) + _, err := io.ReadFull(reader, encodedBytes[i]) + if err != nil { + encodedBytes[i] = nil + errCh <- err + return + } + errCh <- nil + }(reader, i) + // read through errCh for any errors + err := <-errCh + if err != nil { + errRet = err + } else { + readCnt++ + } + } + if readCnt < int(encoder.k) { + return nil, probe.NewError(errRet) + } + decodedData, err := encoder.Decode(encodedBytes, int(curBlockSize)) + if err != nil { + return nil, err.Trace() + } + return decodedData, nil +} + +// getObjectReaders - +func (b bucket) getObjectReaders(objectName, objectMeta string) (map[int]io.ReadCloser, *probe.Error) { + readers := make(map[int]io.ReadCloser) + var disks map[int]block.Block + var err *probe.Error + nodeSlice := 0 + for _, node := range b.nodes { + disks, err = node.ListDisks() + if err != nil { + return nil, err.Trace() + } + for order, disk := range disks { + var objectSlice io.ReadCloser + bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order) + objectPath := filepath.Join(b.xlName, bucketSlice, objectName, objectMeta) + objectSlice, err = disk.Open(objectPath) + if err == nil { + readers[order] = objectSlice + } + } + nodeSlice = nodeSlice + 1 + } + if err != nil { + return nil, err.Trace() + } + return readers, nil +} + +// getObjectWriters - +func (b bucket) getObjectWriters(objectName, objectMeta string) ([]io.WriteCloser, *probe.Error) { + var writers []io.WriteCloser + nodeSlice := 0 + for _, node := range b.nodes { + disks, err := node.ListDisks() + if err != nil { + return nil, err.Trace() + } + writers = make([]io.WriteCloser, len(disks)) + for order, disk := range disks { + bucketSlice := fmt.Sprintf("%s$%d$%d", b.name, nodeSlice, order) + objectPath := filepath.Join(b.xlName, bucketSlice, objectName, objectMeta) + objectSlice, err := disk.CreateFile(objectPath) + if err != nil { + return nil, err.Trace() + } + writers[order] = objectSlice + } + nodeSlice = nodeSlice + 1 + } + return writers, nil +} diff --git a/pkg/xl/cache/data/data.go b/pkg/xl/cache/data/data.go new file mode 100644 index 000000000..56ccdd14c --- /dev/null +++ b/pkg/xl/cache/data/data.go @@ -0,0 +1,204 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package data implements in memory caching methods for data +package data + +import ( + "container/list" + "sync" + "time" +) + +var noExpiration = time.Duration(0) + +// Cache holds the required variables to compose an in memory cache system +// which also provides expiring key mechanism and also maxSize +type Cache struct { + // Mutex is used for handling the concurrent + // read/write requests for cache + sync.Mutex + + // items hold the cached objects + items *list.List + + // reverseItems holds the time that related item's updated at + reverseItems map[interface{}]*list.Element + + // maxSize is a total size for overall cache + maxSize uint64 + + // currentSize is a current size in memory + currentSize uint64 + + // OnEvicted - callback function for eviction + OnEvicted func(a ...interface{}) + + // totalEvicted counter to keep track of total expirations + totalEvicted int +} + +// Stats current cache statistics +type Stats struct { + Bytes uint64 + Items int + Evicted int +} + +type element struct { + key interface{} + value []byte +} + +// NewCache creates an inmemory cache +// +// maxSize is used for expiring objects before we run out of memory +// expiration is used for expiration of a key from cache +func NewCache(maxSize uint64) *Cache { + return &Cache{ + items: list.New(), + reverseItems: make(map[interface{}]*list.Element), + maxSize: maxSize, + } +} + +// SetMaxSize set a new max size +func (r *Cache) SetMaxSize(maxSize uint64) { + r.Lock() + defer r.Unlock() + r.maxSize = maxSize + return +} + +// Stats get current cache statistics +func (r *Cache) Stats() Stats { + return Stats{ + Bytes: r.currentSize, + Items: r.items.Len(), + Evicted: r.totalEvicted, + } +} + +// Get returns a value of a given key if it exists +func (r *Cache) Get(key interface{}) ([]byte, bool) { + r.Lock() + defer r.Unlock() + ele, hit := r.reverseItems[key] + if !hit { + return nil, false + } + r.items.MoveToFront(ele) + return ele.Value.(*element).value, true +} + +// Len returns length of the value of a given key, returns zero if key doesn't exist +func (r *Cache) Len(key interface{}) int { + r.Lock() + defer r.Unlock() + _, ok := r.reverseItems[key] + if !ok { + return 0 + } + return len(r.reverseItems[key].Value.(*element).value) +} + +// Append will append new data to an existing key, +// if key doesn't exist it behaves like Set() +func (r *Cache) Append(key interface{}, value []byte) bool { + r.Lock() + defer r.Unlock() + valueLen := uint64(len(value)) + if r.maxSize > 0 { + // check if the size of the object is not bigger than the + // capacity of the cache + if valueLen > r.maxSize { + return false + } + // remove random key if only we reach the maxSize threshold + for (r.currentSize + valueLen) > r.maxSize { + r.doDeleteOldest() + break + } + } + ele, hit := r.reverseItems[key] + if !hit { + ele := r.items.PushFront(&element{key, value}) + r.currentSize += valueLen + r.reverseItems[key] = ele + return true + } + r.items.MoveToFront(ele) + r.currentSize += valueLen + ele.Value.(*element).value = append(ele.Value.(*element).value, value...) + return true +} + +// Set will persist a value to the cache +func (r *Cache) Set(key interface{}, value []byte) bool { + r.Lock() + defer r.Unlock() + valueLen := uint64(len(value)) + if r.maxSize > 0 { + // check if the size of the object is not bigger than the + // capacity of the cache + if valueLen > r.maxSize { + return false + } + // remove random key if only we reach the maxSize threshold + for (r.currentSize + valueLen) > r.maxSize { + r.doDeleteOldest() + } + } + if _, hit := r.reverseItems[key]; hit { + return false + } + ele := r.items.PushFront(&element{key, value}) + r.currentSize += valueLen + r.reverseItems[key] = ele + return true +} + +// Delete deletes a given key if exists +func (r *Cache) Delete(key interface{}) { + r.Lock() + defer r.Unlock() + ele, ok := r.reverseItems[key] + if !ok { + return + } + if ele != nil { + r.currentSize -= uint64(len(r.reverseItems[key].Value.(*element).value)) + r.items.Remove(ele) + delete(r.reverseItems, key) + r.totalEvicted++ + if r.OnEvicted != nil { + r.OnEvicted(key) + } + } +} + +func (r *Cache) doDeleteOldest() { + ele := r.items.Back() + if ele != nil { + r.currentSize -= uint64(len(r.reverseItems[ele.Value.(*element).key].Value.(*element).value)) + delete(r.reverseItems, ele.Value.(*element).key) + r.items.Remove(ele) + r.totalEvicted++ + if r.OnEvicted != nil { + r.OnEvicted(ele.Value.(*element).key) + } + } +} diff --git a/pkg/xl/cache/data/data_test.go b/pkg/xl/cache/data/data_test.go new file mode 100644 index 000000000..71327f69b --- /dev/null +++ b/pkg/xl/cache/data/data_test.go @@ -0,0 +1,45 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package data + +import ( + "testing" + + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func (s *MySuite) TestCache(c *C) { + cache := NewCache(1000) + data := []byte("Hello, world!") + ok := cache.Set("filename", data) + + c.Assert(ok, Equals, true) + storedata, ok := cache.Get("filename") + + c.Assert(ok, Equals, true) + c.Assert(data, DeepEquals, storedata) + + cache.Delete("filename") + _, ok = cache.Get("filename") + c.Assert(ok, Equals, false) +} diff --git a/pkg/xl/cache/metadata/metadata.go b/pkg/xl/cache/metadata/metadata.go new file mode 100644 index 000000000..8b2d6cce2 --- /dev/null +++ b/pkg/xl/cache/metadata/metadata.go @@ -0,0 +1,110 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package metadata implements in memory caching methods for metadata information +package metadata + +import ( + "sync" + "time" +) + +var noExpiration = time.Duration(0) + +// Cache holds the required variables to compose an in memory cache system +// which also provides expiring key mechanism and also maxSize +type Cache struct { + // Mutex is used for handling the concurrent + // read/write requests for cache + sync.Mutex + + // items hold the cached objects + items map[string]interface{} + + // updatedAt holds the time that related item's updated at + updatedAt map[string]time.Time +} + +// Stats current cache statistics +type Stats struct { + Items int +} + +// NewCache creates an inmemory cache +// +func NewCache() *Cache { + return &Cache{ + items: make(map[string]interface{}), + updatedAt: map[string]time.Time{}, + } +} + +// Stats get current cache statistics +func (r *Cache) Stats() Stats { + return Stats{ + Items: len(r.items), + } +} + +// GetAll returs all the items +func (r *Cache) GetAll() map[string]interface{} { + r.Lock() + defer r.Unlock() + // copy + items := r.items + return items +} + +// Get returns a value of a given key if it exists +func (r *Cache) Get(key string) interface{} { + r.Lock() + defer r.Unlock() + value, ok := r.items[key] + if !ok { + return nil + } + return value +} + +// Exists returns true if key exists +func (r *Cache) Exists(key string) bool { + r.Lock() + defer r.Unlock() + _, ok := r.items[key] + return ok +} + +// Set will persist a value to the cache +func (r *Cache) Set(key string, value interface{}) bool { + r.Lock() + defer r.Unlock() + r.items[key] = value + return true +} + +// Delete deletes a given key if exists +func (r *Cache) Delete(key string) { + r.Lock() + defer r.Unlock() + r.doDelete(key) +} + +func (r *Cache) doDelete(key string) { + if _, ok := r.items[key]; ok { + delete(r.items, key) + delete(r.updatedAt, key) + } +} diff --git a/pkg/xl/cache/metadata/metadata_test.go b/pkg/xl/cache/metadata/metadata_test.go new file mode 100644 index 000000000..7669ccd28 --- /dev/null +++ b/pkg/xl/cache/metadata/metadata_test.go @@ -0,0 +1,46 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metadata + +import ( + "testing" + + . "gopkg.in/check.v1" +) + +func Test(t *testing.T) { TestingT(t) } + +type MySuite struct{} + +var _ = Suite(&MySuite{}) + +func (s *MySuite) TestCache(c *C) { + cache := NewCache() + data := []byte("Hello, world!") + ok := cache.Set("filename", data) + + c.Assert(ok, Equals, true) + storedata := cache.Get("filename") + + c.Assert(ok, Equals, true) + c.Assert(data, DeepEquals, storedata) + + cache.Delete("filename") + + ok = cache.Exists("filename") + c.Assert(ok, Equals, false) +} diff --git a/pkg/xl/common.go b/pkg/xl/common.go new file mode 100644 index 000000000..7bda6202a --- /dev/null +++ b/pkg/xl/common.go @@ -0,0 +1,190 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "bufio" + "bytes" + "io" + "regexp" + "sort" + "strings" + "unicode/utf8" + + "github.com/minio/minio/pkg/atomic" +) + +// IsValidXL - verify xl name is correct +func IsValidXL(xlName string) bool { + if len(xlName) < 3 || len(xlName) > 63 { + return false + } + if xlName[0] == '.' || xlName[len(xlName)-1] == '.' { + return false + } + if match, _ := regexp.MatchString("\\.\\.", xlName); match == true { + return false + } + // We don't support xlNames with '.' in them + match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", xlName) + return match +} + +// IsValidBucket - verify bucket name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html +func IsValidBucket(bucket string) bool { + if len(bucket) < 3 || len(bucket) > 63 { + return false + } + if bucket[0] == '.' || bucket[len(bucket)-1] == '.' { + return false + } + if match, _ := regexp.MatchString("\\.\\.", bucket); match == true { + return false + } + // We don't support buckets with '.' in them + match, _ := regexp.MatchString("^[a-zA-Z][a-zA-Z0-9\\-]+[a-zA-Z0-9]$", bucket) + return match +} + +// IsValidObjectName - verify object name in accordance with +// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html +func IsValidObjectName(object string) bool { + if strings.TrimSpace(object) == "" { + return false + } + if len(object) > 1024 || len(object) == 0 { + return false + } + if !utf8.ValidString(object) { + return false + } + return true +} + +// IsValidPrefix - verify prefix name is correct, an empty prefix is valid +func IsValidPrefix(prefix string) bool { + if strings.TrimSpace(prefix) == "" { + return true + } + return IsValidObjectName(prefix) +} + +// ProxyWriter implements io.Writer to trap written bytes +type ProxyWriter struct { + writer io.Writer + writtenBytes []byte +} + +func (r *ProxyWriter) Write(p []byte) (n int, err error) { + n, err = r.writer.Write(p) + if err != nil { + return + } + r.writtenBytes = append(r.writtenBytes, p[0:n]...) + return +} + +// NewProxyWriter - wrap around a given writer with ProxyWriter +func NewProxyWriter(w io.Writer) *ProxyWriter { + return &ProxyWriter{writer: w, writtenBytes: nil} +} + +// Delimiter delims the string at delimiter +func Delimiter(object, delimiter string) string { + readBuffer := bytes.NewBufferString(object) + reader := bufio.NewReader(readBuffer) + stringReader := strings.NewReader(delimiter) + delimited, _ := stringReader.ReadByte() + delimitedStr, _ := reader.ReadString(delimited) + return delimitedStr +} + +// RemoveDuplicates removes duplicate elements from a slice +func RemoveDuplicates(slice []string) []string { + newSlice := []string{} + seen := make(map[string]struct{}) + for _, val := range slice { + if _, ok := seen[val]; !ok { + newSlice = append(newSlice, val) + seen[val] = struct{}{} // avoiding byte allocation + } + } + return newSlice +} + +// TrimPrefix trims off a prefix string from all the elements in a given slice +func TrimPrefix(objects []string, prefix string) []string { + var results []string + for _, object := range objects { + results = append(results, strings.TrimPrefix(object, prefix)) + } + return results +} + +// HasNoDelimiter provides a new slice from an input slice which has elements without delimiter +func HasNoDelimiter(objects []string, delim string) []string { + var results []string + for _, object := range objects { + if !strings.Contains(object, delim) { + results = append(results, object) + } + } + return results +} + +// HasDelimiter provides a new slice from an input slice which has elements with a delimiter +func HasDelimiter(objects []string, delim string) []string { + var results []string + for _, object := range objects { + if strings.Contains(object, delim) { + results = append(results, object) + } + } + return results +} + +// SplitDelimiter provides a new slice from an input slice by splitting a delimiter +func SplitDelimiter(objects []string, delim string) []string { + var results []string + for _, object := range objects { + parts := strings.Split(object, delim) + results = append(results, parts[0]+delim) + } + return results +} + +// SortUnique sort a slice in lexical order, removing duplicate elements +func SortUnique(objects []string) []string { + objectMap := make(map[string]string) + for _, v := range objects { + objectMap[v] = v + } + var results []string + for k := range objectMap { + results = append(results, k) + } + sort.Strings(results) + return results +} + +// CleanupWritersOnError purge writers on error +func CleanupWritersOnError(writers []io.WriteCloser) { + for _, writer := range writers { + writer.(*atomic.File).CloseAndPurge() + } +} diff --git a/pkg/xl/config.go b/pkg/xl/config.go new file mode 100644 index 000000000..15d05f1b2 --- /dev/null +++ b/pkg/xl/config.go @@ -0,0 +1,80 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "os/user" + "path/filepath" + + "github.com/minio/minio/pkg/probe" + "github.com/minio/minio/pkg/quick" +) + +// getXLConfigPath get xl config file path +func getXLConfigPath() (string, *probe.Error) { + if customConfigPath != "" { + return customConfigPath, nil + } + u, err := user.Current() + if err != nil { + return "", probe.NewError(err) + } + xlConfigPath := filepath.Join(u.HomeDir, ".minio", "xl.json") + return xlConfigPath, nil +} + +// internal variable only accessed via get/set methods +var customConfigPath string + +// SetXLConfigPath - set custom xl config path +func SetXLConfigPath(configPath string) { + customConfigPath = configPath +} + +// SaveConfig save xl config +func SaveConfig(a *Config) *probe.Error { + xlConfigPath, err := getXLConfigPath() + if err != nil { + return err.Trace() + } + qc, err := quick.New(a) + if err != nil { + return err.Trace() + } + if err := qc.Save(xlConfigPath); err != nil { + return err.Trace() + } + return nil +} + +// LoadConfig load xl config +func LoadConfig() (*Config, *probe.Error) { + xlConfigPath, err := getXLConfigPath() + if err != nil { + return nil, err.Trace() + } + a := &Config{} + a.Version = "0.0.1" + qc, err := quick.New(a) + if err != nil { + return nil, err.Trace() + } + if err := qc.Load(xlConfigPath); err != nil { + return nil, err.Trace() + } + return qc.Data().(*Config), nil +} diff --git a/pkg/xl/definitions.go b/pkg/xl/definitions.go new file mode 100644 index 000000000..b1474b450 --- /dev/null +++ b/pkg/xl/definitions.go @@ -0,0 +1,157 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import "time" + +// ObjectMetadata container for object on xl system +type ObjectMetadata struct { + // version + Version string `json:"version"` + + // object metadata + Created time.Time `json:"created"` + Bucket string `json:"bucket"` + Object string `json:"object"` + Size int64 `json:"size"` + + // erasure + DataDisks uint8 `json:"sys.erasureK"` + ParityDisks uint8 `json:"sys.erasureM"` + BlockSize int `json:"sys.blockSize"` + ChunkCount int `json:"sys.chunkCount"` + + // checksums + MD5Sum string `json:"sys.md5sum"` + SHA512Sum string `json:"sys.sha512sum"` + + // metadata + Metadata map[string]string `json:"metadata"` +} + +// Metadata container for xl metadata +type Metadata struct { + Version string `json:"version"` +} + +// AllBuckets container for all buckets +type AllBuckets struct { + Version string `json:"version"` + Buckets map[string]BucketMetadata `json:"buckets"` +} + +// BucketMetadata container for bucket level metadata +type BucketMetadata struct { + Version string `json:"version"` + Name string `json:"name"` + ACL BucketACL `json:"acl"` + Created time.Time `json:"created"` + Multiparts map[string]MultiPartSession `json:"multiparts"` + Metadata map[string]string `json:"metadata"` + BucketObjects map[string]struct{} `json:"objects"` +} + +// ListObjectsResults container for list objects response +type ListObjectsResults struct { + Objects map[string]ObjectMetadata `json:"objects"` + CommonPrefixes []string `json:"commonPrefixes"` + IsTruncated bool `json:"isTruncated"` +} + +// MultiPartSession multipart session +type MultiPartSession struct { + UploadID string `json:"uploadId"` + Initiated time.Time `json:"initiated"` + Parts map[string]PartMetadata `json:"parts"` + TotalParts int `json:"total-parts"` +} + +// PartMetadata - various types of individual part resources +type PartMetadata struct { + PartNumber int + LastModified time.Time + ETag string + Size int64 +} + +// CompletePart - completed part container +type CompletePart struct { + PartNumber int + ETag string +} + +// completedParts is a sortable interface for Part slice +type completedParts []CompletePart + +func (a completedParts) Len() int { return len(a) } +func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// CompleteMultipartUpload container for completing multipart upload +type CompleteMultipartUpload struct { + Part []CompletePart +} + +// ObjectResourcesMetadata - various types of object resources +type ObjectResourcesMetadata struct { + Bucket string + EncodingType string + Key string + UploadID string + StorageClass string + PartNumberMarker int + NextPartNumberMarker int + MaxParts int + IsTruncated bool + + Part []*PartMetadata +} + +// UploadMetadata container capturing metadata on in progress multipart upload in a given bucket +type UploadMetadata struct { + Key string + UploadID string + StorageClass string + Initiated time.Time +} + +// BucketMultipartResourcesMetadata - various types of bucket resources for inprogress multipart uploads +type BucketMultipartResourcesMetadata struct { + KeyMarker string + UploadIDMarker string + NextKeyMarker string + NextUploadIDMarker string + EncodingType string + MaxUploads int + IsTruncated bool + Upload []*UploadMetadata + Prefix string + Delimiter string + CommonPrefixes []string +} + +// BucketResourcesMetadata - various types of bucket resources +type BucketResourcesMetadata struct { + Prefix string + Marker string + NextMarker string + Maxkeys int + EncodingType string + Delimiter string + IsTruncated bool + CommonPrefixes []string +} diff --git a/pkg/xl/encoder.go b/pkg/xl/encoder.go new file mode 100644 index 000000000..f82e04031 --- /dev/null +++ b/pkg/xl/encoder.go @@ -0,0 +1,71 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + encoding "github.com/minio/minio/pkg/erasure" + "github.com/minio/minio/pkg/probe" +) + +// encoder internal struct +type encoder struct { + encoder *encoding.Erasure + k, m uint8 +} + +// newEncoder - instantiate a new encoder +func newEncoder(k, m uint8) (encoder, *probe.Error) { + e := encoder{} + params, err := encoding.ValidateParams(k, m) + if err != nil { + return encoder{}, probe.NewError(err) + } + e.encoder = encoding.NewErasure(params) + e.k = k + e.m = m + return e, nil +} + +// TODO - think again if this is needed +// GetEncodedBlockLen - wrapper around erasure function with the same name +func (e encoder) GetEncodedBlockLen(dataLength int) (int, *probe.Error) { + if dataLength <= 0 { + return 0, probe.NewError(InvalidArgument{}) + } + return encoding.GetEncodedBlockLen(dataLength, e.k), nil +} + +// Encode - erasure code input bytes +func (e encoder) Encode(data []byte) ([][]byte, *probe.Error) { + if data == nil { + return nil, probe.NewError(InvalidArgument{}) + } + encodedData, err := e.encoder.Encode(data) + if err != nil { + return nil, probe.NewError(err) + } + return encodedData, nil +} + +// Decode - erasure decode input encoded bytes +func (e encoder) Decode(encodedData [][]byte, dataLength int) ([]byte, *probe.Error) { + decodedData, err := e.encoder.Decode(encodedData, dataLength) + if err != nil { + return nil, probe.NewError(err) + } + return decodedData, nil +} diff --git a/pkg/xl/errors.go b/pkg/xl/errors.go new file mode 100644 index 000000000..44726873f --- /dev/null +++ b/pkg/xl/errors.go @@ -0,0 +1,333 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import "fmt" + +// InvalidArgument invalid argument +type InvalidArgument struct{} + +func (e InvalidArgument) Error() string { + return "Invalid argument" +} + +// UnsupportedFilesystem unsupported filesystem type +type UnsupportedFilesystem struct { + Type string +} + +func (e UnsupportedFilesystem) Error() string { + return "Unsupported filesystem: " + e.Type +} + +// BucketNotFound bucket does not exist +type BucketNotFound struct { + Bucket string +} + +func (e BucketNotFound) Error() string { + return "Bucket not found: " + e.Bucket +} + +// ObjectExists object exists +type ObjectExists struct { + Object string +} + +func (e ObjectExists) Error() string { + return "Object exists: " + e.Object +} + +// ObjectNotFound object does not exist +type ObjectNotFound struct { + Object string +} + +func (e ObjectNotFound) Error() string { + return "Object not found: " + e.Object +} + +// ObjectCorrupted object found to be corrupted +type ObjectCorrupted struct { + Object string +} + +func (e ObjectCorrupted) Error() string { + return "Object found corrupted: " + e.Object +} + +// BucketExists bucket exists +type BucketExists struct { + Bucket string +} + +func (e BucketExists) Error() string { + return "Bucket exists: " + e.Bucket +} + +// CorruptedBackend backend found to be corrupted +type CorruptedBackend struct { + Backend string +} + +func (e CorruptedBackend) Error() string { + return "Corrupted backend: " + e.Backend +} + +// NotImplemented function not implemented +type NotImplemented struct { + Function string +} + +func (e NotImplemented) Error() string { + return "Not implemented: " + e.Function +} + +// InvalidDisksArgument invalid number of disks per node +type InvalidDisksArgument struct{} + +func (e InvalidDisksArgument) Error() string { + return "Invalid number of disks per node" +} + +// BadDigest bad md5sum +type BadDigest struct{} + +func (e BadDigest) Error() string { + return "Bad digest" +} + +// ParityOverflow parity over flow +type ParityOverflow struct{} + +func (e ParityOverflow) Error() string { + return "Parity overflow" +} + +// ChecksumMismatch checksum mismatch +type ChecksumMismatch struct{} + +func (e ChecksumMismatch) Error() string { + return "Checksum mismatch" +} + +// MissingPOSTPolicy missing post policy +type MissingPOSTPolicy struct{} + +func (e MissingPOSTPolicy) Error() string { + return "Missing POST policy in multipart form" +} + +// InternalError - generic internal error +type InternalError struct { +} + +// BackendError - generic disk backend error +type BackendError struct { + Path string +} + +// BackendCorrupted - path has corrupted data +type BackendCorrupted BackendError + +// APINotImplemented - generic API not implemented error +type APINotImplemented struct { + API string +} + +// GenericBucketError - generic bucket error +type GenericBucketError struct { + Bucket string +} + +// GenericObjectError - generic object error +type GenericObjectError struct { + Bucket string + Object string +} + +// ImplementationError - generic implementation error +type ImplementationError struct { + Bucket string + Object string + Err error +} + +// DigestError - Generic Md5 error +type DigestError struct { + Bucket string + Key string + Md5 string +} + +/// ACL related errors + +// InvalidACL - acl invalid +type InvalidACL struct { + ACL string +} + +func (e InvalidACL) Error() string { + return "Requested ACL is " + e.ACL + " invalid" +} + +/// Bucket related errors + +// BucketNameInvalid - bucketname provided is invalid +type BucketNameInvalid GenericBucketError + +// TooManyBuckets - total buckets exceeded +type TooManyBuckets GenericBucketError + +/// Object related errors + +// EntityTooLarge - object size exceeds maximum limit +type EntityTooLarge struct { + GenericObjectError + Size string + MaxSize string +} + +// ObjectNameInvalid - object name provided is invalid +type ObjectNameInvalid GenericObjectError + +// InvalidDigest - md5 in request header invalid +type InvalidDigest DigestError + +// Return string an error formatted as the given text +func (e ImplementationError) Error() string { + error := "" + if e.Bucket != "" { + error = error + "Bucket: " + e.Bucket + " " + } + if e.Object != "" { + error = error + "Object: " + e.Object + " " + } + error = error + "Error: " + e.Err.Error() + return error +} + +// EmbedError - wrapper function for error object +func EmbedError(bucket, object string, err error) ImplementationError { + return ImplementationError{ + Bucket: bucket, + Object: object, + Err: err, + } +} + +// Return string an error formatted as the given text +func (e InternalError) Error() string { + return "Internal error occured" +} + +// Return string an error formatted as the given text +func (e APINotImplemented) Error() string { + return "Api not implemented: " + e.API +} + +// Return string an error formatted as the given text +func (e BucketNameInvalid) Error() string { + return "Bucket name invalid: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e TooManyBuckets) Error() string { + return "Bucket limit exceeded beyond 100, cannot create bucket: " + e.Bucket +} + +// Return string an error formatted as the given text +func (e ObjectNameInvalid) Error() string { + return "Object name invalid: " + e.Bucket + "#" + e.Object +} + +// Return string an error formatted as the given text +func (e EntityTooLarge) Error() string { + return e.Bucket + "#" + e.Object + "with " + e.Size + "reached maximum allowed size limit " + e.MaxSize +} + +// IncompleteBody You did not provide the number of bytes specified by the Content-Length HTTP header +type IncompleteBody GenericObjectError + +// Return string an error formatted as the given text +func (e IncompleteBody) Error() string { + return e.Bucket + "#" + e.Object + "has incomplete body" +} + +// Return string an error formatted as the given text +func (e BackendCorrupted) Error() string { + return "Backend corrupted: " + e.Path +} + +// Return string an error formatted as the given text +func (e InvalidDigest) Error() string { + return "Md5 provided " + e.Md5 + " is invalid" +} + +// OperationNotPermitted - operation not permitted +type OperationNotPermitted struct { + Op string + Reason string +} + +func (e OperationNotPermitted) Error() string { + return "Operation " + e.Op + " not permitted for reason: " + e.Reason +} + +// InvalidRange - invalid range +type InvalidRange struct { + Start int64 + Length int64 +} + +func (e InvalidRange) Error() string { + return fmt.Sprintf("Invalid range start:%d length:%d", e.Start, e.Length) +} + +/// Multipart related errors + +// InvalidUploadID invalid upload id +type InvalidUploadID struct { + UploadID string +} + +func (e InvalidUploadID) Error() string { + return "Invalid upload id " + e.UploadID +} + +// InvalidPart One or more of the specified parts could not be found +type InvalidPart struct{} + +func (e InvalidPart) Error() string { + return "One or more of the specified parts could not be found" +} + +// InvalidPartOrder parts are not ordered as Requested +type InvalidPartOrder struct { + UploadID string +} + +func (e InvalidPartOrder) Error() string { + return "Invalid part order sent for " + e.UploadID +} + +// MalformedXML invalid xml format +type MalformedXML struct{} + +func (e MalformedXML) Error() string { + return "Malformed XML" +} diff --git a/pkg/xl/heal.go b/pkg/xl/heal.go new file mode 100644 index 000000000..6af30ffd1 --- /dev/null +++ b/pkg/xl/heal.go @@ -0,0 +1,69 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "encoding/json" + "fmt" + "path/filepath" + + "github.com/minio/minio/pkg/probe" + "github.com/minio/minio/pkg/xl/block" +) + +// healBuckets heal bucket slices +func (xl API) healBuckets() *probe.Error { + if err := xl.listXLBuckets(); err != nil { + return err.Trace() + } + bucketMetadata, err := xl.getXLBucketMetadata() + if err != nil { + return err.Trace() + } + disks := make(map[int]block.Block) + for _, node := range xl.nodes { + nDisks, err := node.ListDisks() + if err != nil { + return err.Trace() + } + for k, v := range nDisks { + disks[k] = v + } + } + for order, disk := range disks { + if disk.IsUsable() { + disk.MakeDir(xl.config.XLName) + bucketMetadataWriter, err := disk.CreateFile(filepath.Join(xl.config.XLName, bucketMetadataConfig)) + if err != nil { + return err.Trace() + } + defer bucketMetadataWriter.Close() + jenc := json.NewEncoder(bucketMetadataWriter) + if err := jenc.Encode(bucketMetadata); err != nil { + return probe.NewError(err) + } + for bucket := range bucketMetadata.Buckets { + bucketSlice := fmt.Sprintf("%s$0$%d", bucket, order) // TODO handle node slices + err := disk.MakeDir(filepath.Join(xl.config.XLName, bucketSlice)) + if err != nil { + return err.Trace() + } + } + } + } + return nil +} diff --git a/pkg/xl/interfaces.go b/pkg/xl/interfaces.go new file mode 100644 index 000000000..ad16642d7 --- /dev/null +++ b/pkg/xl/interfaces.go @@ -0,0 +1,72 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "io" + + "github.com/minio/minio/pkg/probe" + signV4 "github.com/minio/minio/pkg/signature" +) + +// Collection of XL specification interfaces + +// Interface is a collection of cloud storage and management interface +type Interface interface { + CloudStorage + Management +} + +// CloudStorage is a xl cloud storage interface +type CloudStorage interface { + // Storage service operations + GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) + SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error + ListBuckets() ([]BucketMetadata, *probe.Error) + MakeBucket(bucket string, ACL string, location io.Reader, signature *signV4.Signature) *probe.Error + + // Bucket operations + ListObjects(string, BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) + + // Object operations + GetObject(w io.Writer, bucket, object string, start, length int64) (int64, *probe.Error) + GetObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) + // bucket, object, expectedMD5Sum, size, reader, metadata, signature + CreateObject(string, string, string, int64, io.Reader, map[string]string, *signV4.Signature) (ObjectMetadata, *probe.Error) + + Multipart +} + +// Multipart API +type Multipart interface { + NewMultipartUpload(bucket, key, contentType string) (string, *probe.Error) + AbortMultipartUpload(bucket, key, uploadID string) *probe.Error + CreateObjectPart(string, string, string, int, string, string, int64, io.Reader, *signV4.Signature) (string, *probe.Error) + CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) + ListMultipartUploads(string, BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) + ListObjectParts(string, string, ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) +} + +// Management is a xl management system interface +type Management interface { + Heal() *probe.Error + Rebalance() *probe.Error + Info() (map[string][]string, *probe.Error) + + AttachNode(hostname string, disks []string) *probe.Error + DetachNode(hostname string) *probe.Error +} diff --git a/pkg/xl/management.go b/pkg/xl/management.go new file mode 100644 index 000000000..3f89c95d0 --- /dev/null +++ b/pkg/xl/management.go @@ -0,0 +1,81 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "github.com/minio/minio/pkg/probe" + "github.com/minio/minio/pkg/xl/block" +) + +// Info - return info about xl configuration +func (xl API) Info() (nodeDiskMap map[string][]string, err *probe.Error) { + nodeDiskMap = make(map[string][]string) + for nodeName, n := range xl.nodes { + disks, err := n.ListDisks() + if err != nil { + return nil, err.Trace() + } + diskList := make([]string, len(disks)) + for diskOrder, disk := range disks { + diskList[diskOrder] = disk.GetPath() + } + nodeDiskMap[nodeName] = diskList + } + return nodeDiskMap, nil +} + +// AttachNode - attach node +func (xl API) AttachNode(hostname string, disks []string) *probe.Error { + if hostname == "" || len(disks) == 0 { + return probe.NewError(InvalidArgument{}) + } + n, err := newNode(hostname) + if err != nil { + return err.Trace() + } + xl.nodes[hostname] = n + for i, d := range disks { + newDisk, err := block.New(d) + if err != nil { + continue + } + if err := newDisk.MakeDir(xl.config.XLName); err != nil { + return err.Trace() + } + if err := n.AttachDisk(newDisk, i); err != nil { + return err.Trace() + } + } + return nil +} + +// DetachNode - detach node +func (xl API) DetachNode(hostname string) *probe.Error { + delete(xl.nodes, hostname) + return nil +} + +// Rebalance - rebalance an existing xl with new disks and nodes +func (xl API) Rebalance() *probe.Error { + return probe.NewError(APINotImplemented{API: "management.Rebalance"}) +} + +// Heal - heal your xls +func (xl API) Heal() *probe.Error { + // TODO handle data heal + return xl.healBuckets() +} diff --git a/pkg/xl/multipart.go b/pkg/xl/multipart.go new file mode 100644 index 000000000..0588c0816 --- /dev/null +++ b/pkg/xl/multipart.go @@ -0,0 +1,514 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "bytes" + "crypto/md5" + "crypto/sha512" + "encoding/base64" + "encoding/hex" + "encoding/xml" + + "io" + "io/ioutil" + "math/rand" + "runtime/debug" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio/pkg/crypto/sha256" + "github.com/minio/minio/pkg/probe" + signV4 "github.com/minio/minio/pkg/signature" + "github.com/minio/minio/pkg/xl/cache/data" +) + +/// V2 API functions + +// NewMultipartUpload - initiate a new multipart session +func (xl API) NewMultipartUpload(bucket, key, contentType string) (string, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(key) { + return "", probe.NewError(ObjectNameInvalid{Object: key}) + } + // if len(xl.config.NodeDiskMap) > 0 { + // return xl.newMultipartUpload(bucket, key, contentType) + // } + if !xl.storedBuckets.Exists(bucket) { + return "", probe.NewError(BucketNotFound{Bucket: bucket}) + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + objectKey := bucket + "/" + key + if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { + return "", probe.NewError(ObjectExists{Object: key}) + } + id := []byte(strconv.Itoa(rand.Int()) + bucket + key + time.Now().UTC().String()) + uploadIDSum := sha512.Sum512(id) + uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] + + storedBucket.multiPartSession[key] = MultiPartSession{ + UploadID: uploadID, + Initiated: time.Now().UTC(), + TotalParts: 0, + } + storedBucket.partMetadata[key] = make(map[int]PartMetadata) + multiPartCache := data.NewCache(0) + multiPartCache.OnEvicted = xl.evictedPart + xl.multiPartObjects[uploadID] = multiPartCache + xl.storedBuckets.Set(bucket, storedBucket) + return uploadID, nil +} + +// AbortMultipartUpload - abort an incomplete multipart session +func (xl API) AbortMultipartUpload(bucket, key, uploadID string) *probe.Error { + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(key) { + return probe.NewError(ObjectNameInvalid{Object: key}) + } + // TODO: multipart support for xl is broken, since we haven't finalized the format in which + // it can be stored, disabling this for now until we get the underlying layout stable. + // + // if len(xl.config.NodeDiskMap) > 0 { + // return xl.abortMultipartUpload(bucket, key, uploadID) + // } + if !xl.storedBuckets.Exists(bucket) { + return probe.NewError(BucketNotFound{Bucket: bucket}) + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + if storedBucket.multiPartSession[key].UploadID != uploadID { + return probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + xl.cleanupMultipartSession(bucket, key, uploadID) + return nil +} + +// CreateObjectPart - create a part in a multipart session +func (xl API) CreateObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (string, *probe.Error) { + xl.lock.Lock() + etag, err := xl.createObjectPart(bucket, key, uploadID, partID, "", expectedMD5Sum, size, data, signature) + xl.lock.Unlock() + // possible free + debug.FreeOSMemory() + + return etag, err.Trace() +} + +// createObject - internal wrapper function called by CreateObjectPart +func (xl API) createObjectPart(bucket, key, uploadID string, partID int, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (string, *probe.Error) { + if !IsValidBucket(bucket) { + return "", probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(key) { + return "", probe.NewError(ObjectNameInvalid{Object: key}) + } + // TODO: multipart support for xl is broken, since we haven't finalized the format in which + // it can be stored, disabling this for now until we get the underlying layout stable. + // + /* + if len(xl.config.NodeDiskMap) > 0 { + metadata := make(map[string]string) + if contentType == "" { + contentType = "application/octet-stream" + } + contentType = strings.TrimSpace(contentType) + metadata["contentType"] = contentType + if strings.TrimSpace(expectedMD5Sum) != "" { + expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) + if err != nil { + // pro-actively close the connection + return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) + } + expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) + } + partMetadata, err := xl.putObjectPart(bucket, key, expectedMD5Sum, uploadID, partID, data, size, metadata, signature) + if err != nil { + return "", err.Trace() + } + return partMetadata.ETag, nil + } + */ + if !xl.storedBuckets.Exists(bucket) { + return "", probe.NewError(BucketNotFound{Bucket: bucket}) + } + strBucket := xl.storedBuckets.Get(bucket).(storedBucket) + // Verify upload id + if strBucket.multiPartSession[key].UploadID != uploadID { + return "", probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + + // get object key + parts := strBucket.partMetadata[key] + if _, ok := parts[partID]; ok { + return parts[partID].ETag, nil + } + + if contentType == "" { + contentType = "application/octet-stream" + } + contentType = strings.TrimSpace(contentType) + if strings.TrimSpace(expectedMD5Sum) != "" { + expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) + if err != nil { + // pro-actively close the connection + return "", probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) + } + expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) + } + + // calculate md5 + hash := md5.New() + sha256hash := sha256.New() + + var totalLength int64 + var err error + for err == nil { + var length int + byteBuffer := make([]byte, 1024*1024) + length, err = data.Read(byteBuffer) // do not read error return error here, we will handle this error later + if length != 0 { + hash.Write(byteBuffer[0:length]) + sha256hash.Write(byteBuffer[0:length]) + ok := xl.multiPartObjects[uploadID].Append(partID, byteBuffer[0:length]) + if !ok { + return "", probe.NewError(InternalError{}) + } + totalLength += int64(length) + go debug.FreeOSMemory() + } + } + if totalLength != size { + xl.multiPartObjects[uploadID].Delete(partID) + return "", probe.NewError(IncompleteBody{Bucket: bucket, Object: key}) + } + if err != io.EOF { + return "", probe.NewError(err) + } + + md5SumBytes := hash.Sum(nil) + md5Sum := hex.EncodeToString(md5SumBytes) + // Verify if the written object is equal to what is expected, only if it is requested as such + if strings.TrimSpace(expectedMD5Sum) != "" { + if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { + return "", err.Trace() + } + } + + if signature != nil { + { + ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) + if err != nil { + return "", err.Trace() + } + if !ok { + return "", probe.NewError(signV4.SigDoesNotMatch{}) + } + } + } + + newPart := PartMetadata{ + PartNumber: partID, + LastModified: time.Now().UTC(), + ETag: md5Sum, + Size: totalLength, + } + + parts[partID] = newPart + strBucket.partMetadata[key] = parts + multiPartSession := strBucket.multiPartSession[key] + multiPartSession.TotalParts++ + strBucket.multiPartSession[key] = multiPartSession + xl.storedBuckets.Set(bucket, strBucket) + return md5Sum, nil +} + +// cleanupMultipartSession invoked during an abort or complete multipart session to cleanup session from memory +func (xl API) cleanupMultipartSession(bucket, key, uploadID string) { + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + for i := 1; i <= storedBucket.multiPartSession[key].TotalParts; i++ { + xl.multiPartObjects[uploadID].Delete(i) + } + delete(storedBucket.multiPartSession, key) + delete(storedBucket.partMetadata, key) + xl.storedBuckets.Set(bucket, storedBucket) +} + +func (xl API) mergeMultipart(parts *CompleteMultipartUpload, uploadID string, fullObjectWriter *io.PipeWriter) { + for _, part := range parts.Part { + recvMD5 := part.ETag + object, ok := xl.multiPartObjects[uploadID].Get(part.PartNumber) + if ok == false { + fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidPart{}))) + return + } + calcMD5Bytes := md5.Sum(object) + // complete multi part request header md5sum per part is hex encoded + recvMD5Bytes, err := hex.DecodeString(strings.Trim(recvMD5, "\"")) + if err != nil { + fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(InvalidDigest{Md5: recvMD5}))) + return + } + if !bytes.Equal(recvMD5Bytes, calcMD5Bytes[:]) { + fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(BadDigest{}))) + return + } + + if _, err := io.Copy(fullObjectWriter, bytes.NewReader(object)); err != nil { + fullObjectWriter.CloseWithError(probe.WrapError(probe.NewError(err))) + return + } + object = nil + } + fullObjectWriter.Close() + return +} + +// CompleteMultipartUpload - complete a multipart upload and persist the data +func (xl API) CompleteMultipartUpload(bucket, key, uploadID string, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + size := int64(xl.multiPartObjects[uploadID].Stats().Bytes) + fullObjectReader, err := xl.completeMultipartUploadV2(bucket, key, uploadID, data, signature) + if err != nil { + return ObjectMetadata{}, err.Trace() + } + objectMetadata, err := xl.createObject(bucket, key, "", "", size, fullObjectReader, nil) + if err != nil { + // No need to call internal cleanup functions here, caller should call AbortMultipartUpload() + // which would in-turn cleanup properly in accordance with S3 Spec + return ObjectMetadata{}, err.Trace() + } + xl.cleanupMultipartSession(bucket, key, uploadID) + return objectMetadata, nil +} + +func (xl API) completeMultipartUploadV2(bucket, key, uploadID string, data io.Reader, signature *signV4.Signature) (io.Reader, *probe.Error) { + if !IsValidBucket(bucket) { + return nil, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(key) { + return nil, probe.NewError(ObjectNameInvalid{Object: key}) + } + + // TODO: multipart support for xl is broken, since we haven't finalized the format in which + // it can be stored, disabling this for now until we get the underlying layout stable. + // + // if len(xl.config.NodeDiskMap) > 0 { + // xl.lock.Unlock() + // return xl.completeMultipartUpload(bucket, key, uploadID, data, signature) + // } + + if !xl.storedBuckets.Exists(bucket) { + return nil, probe.NewError(BucketNotFound{Bucket: bucket}) + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + // Verify upload id + if storedBucket.multiPartSession[key].UploadID != uploadID { + return nil, probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + partBytes, err := ioutil.ReadAll(data) + if err != nil { + return nil, probe.NewError(err) + } + if signature != nil { + partHashBytes := sha256.Sum256(partBytes) + ok, err := signature.DoesSignatureMatch(hex.EncodeToString(partHashBytes[:])) + if err != nil { + return nil, err.Trace() + } + if !ok { + return nil, probe.NewError(signV4.SigDoesNotMatch{}) + } + } + parts := &CompleteMultipartUpload{} + if err := xml.Unmarshal(partBytes, parts); err != nil { + return nil, probe.NewError(MalformedXML{}) + } + if !sort.IsSorted(completedParts(parts.Part)) { + return nil, probe.NewError(InvalidPartOrder{}) + } + + fullObjectReader, fullObjectWriter := io.Pipe() + go xl.mergeMultipart(parts, uploadID, fullObjectWriter) + + return fullObjectReader, nil +} + +// byKey is a sortable interface for UploadMetadata slice +type byKey []*UploadMetadata + +func (a byKey) Len() int { return len(a) } +func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byKey) Less(i, j int) bool { return a[i].Key < a[j].Key } + +// ListMultipartUploads - list incomplete multipart sessions for a given bucket +func (xl API) ListMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) { + // TODO handle delimiter, low priority + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + + // TODO: multipart support for xl is broken, since we haven't finalized the format in which + // it can be stored, disabling this for now until we get the underlying layout stable. + // + // if len(xl.config.NodeDiskMap) > 0 { + // return xl.listMultipartUploads(bucket, resources) + // } + + if !xl.storedBuckets.Exists(bucket) { + return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + var uploads []*UploadMetadata + + for key, session := range storedBucket.multiPartSession { + if strings.HasPrefix(key, resources.Prefix) { + if len(uploads) > resources.MaxUploads { + sort.Sort(byKey(uploads)) + resources.Upload = uploads + resources.NextKeyMarker = key + resources.NextUploadIDMarker = session.UploadID + resources.IsTruncated = true + return resources, nil + } + // uploadIDMarker is ignored if KeyMarker is empty + switch { + case resources.KeyMarker != "" && resources.UploadIDMarker == "": + if key > resources.KeyMarker { + upload := new(UploadMetadata) + upload.Key = key + upload.UploadID = session.UploadID + upload.Initiated = session.Initiated + uploads = append(uploads, upload) + } + case resources.KeyMarker != "" && resources.UploadIDMarker != "": + if session.UploadID > resources.UploadIDMarker { + if key >= resources.KeyMarker { + upload := new(UploadMetadata) + upload.Key = key + upload.UploadID = session.UploadID + upload.Initiated = session.Initiated + uploads = append(uploads, upload) + } + } + default: + upload := new(UploadMetadata) + upload.Key = key + upload.UploadID = session.UploadID + upload.Initiated = session.Initiated + uploads = append(uploads, upload) + } + } + } + sort.Sort(byKey(uploads)) + resources.Upload = uploads + return resources, nil +} + +// partNumber is a sortable interface for Part slice +type partNumber []*PartMetadata + +func (a partNumber) Len() int { return len(a) } +func (a partNumber) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a partNumber) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber } + +// ListObjectParts - list parts from incomplete multipart session for a given object +func (xl API) ListObjectParts(bucket, key string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) { + // Verify upload id + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return ObjectResourcesMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(key) { + return ObjectResourcesMetadata{}, probe.NewError(ObjectNameInvalid{Object: key}) + } + + // TODO: multipart support for xl is broken, since we haven't finalized the format in which + // it can be stored, disabling this for now until we get the underlying layout stable. + // + // if len(xl.config.NodeDiskMap) > 0 { + // return xl.listObjectParts(bucket, key, resources) + // } + + if !xl.storedBuckets.Exists(bucket) { + return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + if _, ok := storedBucket.multiPartSession[key]; ok == false { + return ObjectResourcesMetadata{}, probe.NewError(ObjectNotFound{Object: key}) + } + if storedBucket.multiPartSession[key].UploadID != resources.UploadID { + return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID}) + } + storedParts := storedBucket.partMetadata[key] + objectResourcesMetadata := resources + objectResourcesMetadata.Bucket = bucket + objectResourcesMetadata.Key = key + var parts []*PartMetadata + var startPartNumber int + switch { + case objectResourcesMetadata.PartNumberMarker == 0: + startPartNumber = 1 + default: + startPartNumber = objectResourcesMetadata.PartNumberMarker + } + for i := startPartNumber; i <= storedBucket.multiPartSession[key].TotalParts; i++ { + if len(parts) > objectResourcesMetadata.MaxParts { + sort.Sort(partNumber(parts)) + objectResourcesMetadata.IsTruncated = true + objectResourcesMetadata.Part = parts + objectResourcesMetadata.NextPartNumberMarker = i + return objectResourcesMetadata, nil + } + part, ok := storedParts[i] + if !ok { + return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{}) + } + parts = append(parts, &part) + } + sort.Sort(partNumber(parts)) + objectResourcesMetadata.Part = parts + return objectResourcesMetadata, nil +} + +// evictedPart - call back function called by caching module during individual cache evictions +func (xl API) evictedPart(a ...interface{}) { + // loop through all buckets + buckets := xl.storedBuckets.GetAll() + for bucketName, bucket := range buckets { + b := bucket.(storedBucket) + xl.storedBuckets.Set(bucketName, b) + } + debug.FreeOSMemory() +} diff --git a/pkg/xl/node.go b/pkg/xl/node.go new file mode 100644 index 000000000..a029a15e7 --- /dev/null +++ b/pkg/xl/node.go @@ -0,0 +1,76 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "github.com/minio/minio/pkg/probe" + "github.com/minio/minio/pkg/xl/block" +) + +// node struct internal +type node struct { + hostname string + disks map[int]block.Block +} + +// newNode - instantiates a new node +func newNode(hostname string) (node, *probe.Error) { + if hostname == "" { + return node{}, probe.NewError(InvalidArgument{}) + } + disks := make(map[int]block.Block) + n := node{ + hostname: hostname, + disks: disks, + } + return n, nil +} + +// GetHostname - return hostname +func (n node) GetHostname() string { + return n.hostname +} + +// ListDisks - return number of disks +func (n node) ListDisks() (map[int]block.Block, *probe.Error) { + return n.disks, nil +} + +// AttachDisk - attach a disk +func (n node) AttachDisk(disk block.Block, diskOrder int) *probe.Error { + if diskOrder < 0 { + return probe.NewError(InvalidArgument{}) + } + n.disks[diskOrder] = disk + return nil +} + +// DetachDisk - detach a disk +func (n node) DetachDisk(diskOrder int) *probe.Error { + delete(n.disks, diskOrder) + return nil +} + +// SaveConfig - save node configuration +func (n node) SaveConfig() *probe.Error { + return probe.NewError(NotImplemented{Function: "SaveConfig"}) +} + +// LoadConfig - load node configuration from saved configs +func (n node) LoadConfig() *probe.Error { + return probe.NewError(NotImplemented{Function: "LoadConfig"}) +} diff --git a/pkg/xl/xl-metadata.md b/pkg/xl/xl-metadata.md new file mode 100644 index 000000000..1825741c5 --- /dev/null +++ b/pkg/xl/xl-metadata.md @@ -0,0 +1,55 @@ +##### Users Collection + +```js + +"minio": { + "version": 1, + "users": [{ + "secretAccessKey": String, + "accessKeyId": String, + "status": String // enum: ok, disabled, deleted + }], + "hosts": [{ + "address": String, + "uuid": String, + "status": String, // enum: ok, disabled, deleted, busy, offline. + "disks": [{ + "disk": String, + "uuid": String, + "status": String // ok, offline, disabled, busy. + }] + }] +} +``` + +##### Bucket Collection + +```js +"buckets": { + "bucket": String, // index + "deleted": Boolean, + "permissions": String +} +``` + +##### Object Collection + +```js +"objects": { + "key": String, // index + "createdAt": Date, + "hosts[16]": [{ + "host": String, + "disk": String, + }], + "deleted": Boolean +} +``` + +```js +"meta": { + "key": String, // index + "type": String // content-type + // type speific meta +} +``` diff --git a/pkg/xl/xl-v1.go b/pkg/xl/xl-v1.go new file mode 100644 index 000000000..14cbfeaaa --- /dev/null +++ b/pkg/xl/xl-v1.go @@ -0,0 +1,681 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "encoding/base64" + "encoding/hex" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/minio/minio/pkg/crypto/sha256" + "github.com/minio/minio/pkg/crypto/sha512" + "github.com/minio/minio/pkg/probe" + signV4 "github.com/minio/minio/pkg/signature" + "github.com/minio/minio/pkg/xl/block" +) + +// config files used inside XL +const ( + // bucket, object metadata + bucketMetadataConfig = "bucketMetadata.json" + objectMetadataConfig = "objectMetadata.json" + + // versions + objectMetadataVersion = "1.0.0" + bucketMetadataVersion = "1.0.0" +) + +/// v1 API functions + +// makeBucket - make a new bucket +func (xl API) makeBucket(bucket string, acl BucketACL) *probe.Error { + if bucket == "" || strings.TrimSpace(bucket) == "" { + return probe.NewError(InvalidArgument{}) + } + return xl.makeXLBucket(bucket, acl.String()) +} + +// getBucketMetadata - get bucket metadata +func (xl API) getBucketMetadata(bucketName string) (BucketMetadata, *probe.Error) { + if err := xl.listXLBuckets(); err != nil { + return BucketMetadata{}, err.Trace() + } + if _, ok := xl.buckets[bucketName]; !ok { + return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucketName}) + } + metadata, err := xl.getXLBucketMetadata() + if err != nil { + return BucketMetadata{}, err.Trace() + } + return metadata.Buckets[bucketName], nil +} + +// setBucketMetadata - set bucket metadata +func (xl API) setBucketMetadata(bucketName string, bucketMetadata map[string]string) *probe.Error { + if err := xl.listXLBuckets(); err != nil { + return err.Trace() + } + metadata, err := xl.getXLBucketMetadata() + if err != nil { + return err.Trace() + } + oldBucketMetadata := metadata.Buckets[bucketName] + acl, ok := bucketMetadata["acl"] + if !ok { + return probe.NewError(InvalidArgument{}) + } + oldBucketMetadata.ACL = BucketACL(acl) + metadata.Buckets[bucketName] = oldBucketMetadata + return xl.setXLBucketMetadata(metadata) +} + +// listBuckets - return list of buckets +func (xl API) listBuckets() (map[string]BucketMetadata, *probe.Error) { + if err := xl.listXLBuckets(); err != nil { + return nil, err.Trace() + } + metadata, err := xl.getXLBucketMetadata() + if err != nil { + // intentionally left out the error when XL is empty + // but we need to revisit this area in future - since we need + // to figure out between acceptable and unacceptable errors + return make(map[string]BucketMetadata), nil + } + if metadata == nil { + return make(map[string]BucketMetadata), nil + } + return metadata.Buckets, nil +} + +// listObjects - return list of objects +func (xl API) listObjects(bucket, prefix, marker, delimiter string, maxkeys int) (ListObjectsResults, *probe.Error) { + if err := xl.listXLBuckets(); err != nil { + return ListObjectsResults{}, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return ListObjectsResults{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + listObjects, err := xl.buckets[bucket].ListObjects(prefix, marker, delimiter, maxkeys) + if err != nil { + return ListObjectsResults{}, err.Trace() + } + return listObjects, nil +} + +// putObject - put object +func (xl API) putObject(bucket, object, expectedMD5Sum string, reader io.Reader, size int64, metadata map[string]string, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { + if bucket == "" || strings.TrimSpace(bucket) == "" { + return ObjectMetadata{}, probe.NewError(InvalidArgument{}) + } + if object == "" || strings.TrimSpace(object) == "" { + return ObjectMetadata{}, probe.NewError(InvalidArgument{}) + } + if err := xl.listXLBuckets(); err != nil { + return ObjectMetadata{}, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + bucketMeta, err := xl.getXLBucketMetadata() + if err != nil { + return ObjectMetadata{}, err.Trace() + } + if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok { + return ObjectMetadata{}, probe.NewError(ObjectExists{Object: object}) + } + objMetadata, err := xl.buckets[bucket].WriteObject(object, reader, size, expectedMD5Sum, metadata, signature) + if err != nil { + return ObjectMetadata{}, err.Trace() + } + bucketMeta.Buckets[bucket].BucketObjects[object] = struct{}{} + if err := xl.setXLBucketMetadata(bucketMeta); err != nil { + return ObjectMetadata{}, err.Trace() + } + return objMetadata, nil +} + +// putObject - put object +func (xl API) putObjectPart(bucket, object, expectedMD5Sum, uploadID string, partID int, reader io.Reader, size int64, metadata map[string]string, signature *signV4.Signature) (PartMetadata, *probe.Error) { + if bucket == "" || strings.TrimSpace(bucket) == "" { + return PartMetadata{}, probe.NewError(InvalidArgument{}) + } + if object == "" || strings.TrimSpace(object) == "" { + return PartMetadata{}, probe.NewError(InvalidArgument{}) + } + if err := xl.listXLBuckets(); err != nil { + return PartMetadata{}, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return PartMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + bucketMeta, err := xl.getXLBucketMetadata() + if err != nil { + return PartMetadata{}, err.Trace() + } + if _, ok := bucketMeta.Buckets[bucket].Multiparts[object]; !ok { + return PartMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; ok { + return PartMetadata{}, probe.NewError(ObjectExists{Object: object}) + } + objectPart := object + "/" + "multipart" + "/" + strconv.Itoa(partID) + objmetadata, err := xl.buckets[bucket].WriteObject(objectPart, reader, size, expectedMD5Sum, metadata, signature) + if err != nil { + return PartMetadata{}, err.Trace() + } + partMetadata := PartMetadata{ + PartNumber: partID, + LastModified: objmetadata.Created, + ETag: objmetadata.MD5Sum, + Size: objmetadata.Size, + } + multipartSession := bucketMeta.Buckets[bucket].Multiparts[object] + multipartSession.Parts[strconv.Itoa(partID)] = partMetadata + bucketMeta.Buckets[bucket].Multiparts[object] = multipartSession + if err := xl.setXLBucketMetadata(bucketMeta); err != nil { + return PartMetadata{}, err.Trace() + } + return partMetadata, nil +} + +// getObject - get object +func (xl API) getObject(bucket, object string) (reader io.ReadCloser, size int64, err *probe.Error) { + if bucket == "" || strings.TrimSpace(bucket) == "" { + return nil, 0, probe.NewError(InvalidArgument{}) + } + if object == "" || strings.TrimSpace(object) == "" { + return nil, 0, probe.NewError(InvalidArgument{}) + } + if err := xl.listXLBuckets(); err != nil { + return nil, 0, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return nil, 0, probe.NewError(BucketNotFound{Bucket: bucket}) + } + return xl.buckets[bucket].ReadObject(object) +} + +// getObjectMetadata - get object metadata +func (xl API) getObjectMetadata(bucket, object string) (ObjectMetadata, *probe.Error) { + if err := xl.listXLBuckets(); err != nil { + return ObjectMetadata{}, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + bucketMeta, err := xl.getXLBucketMetadata() + if err != nil { + return ObjectMetadata{}, err.Trace() + } + if _, ok := bucketMeta.Buckets[bucket].BucketObjects[object]; !ok { + return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: object}) + } + objectMetadata, err := xl.buckets[bucket].GetObjectMetadata(object) + if err != nil { + return ObjectMetadata{}, err.Trace() + } + return objectMetadata, nil +} + +// newMultipartUpload - new multipart upload request +func (xl API) newMultipartUpload(bucket, object, contentType string) (string, *probe.Error) { + if err := xl.listXLBuckets(); err != nil { + return "", err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return "", probe.NewError(BucketNotFound{Bucket: bucket}) + } + allbuckets, err := xl.getXLBucketMetadata() + if err != nil { + return "", err.Trace() + } + bucketMetadata := allbuckets.Buckets[bucket] + multiparts := make(map[string]MultiPartSession) + if len(bucketMetadata.Multiparts) > 0 { + multiparts = bucketMetadata.Multiparts + } + + id := []byte(strconv.Itoa(rand.Int()) + bucket + object + time.Now().String()) + uploadIDSum := sha512.Sum512(id) + uploadID := base64.URLEncoding.EncodeToString(uploadIDSum[:])[:47] + + multipartSession := MultiPartSession{ + UploadID: uploadID, + Initiated: time.Now().UTC(), + Parts: make(map[string]PartMetadata), + TotalParts: 0, + } + multiparts[object] = multipartSession + bucketMetadata.Multiparts = multiparts + allbuckets.Buckets[bucket] = bucketMetadata + + if err := xl.setXLBucketMetadata(allbuckets); err != nil { + return "", err.Trace() + } + + return uploadID, nil +} + +// listObjectParts list all object parts +func (xl API) listObjectParts(bucket, object string, resources ObjectResourcesMetadata) (ObjectResourcesMetadata, *probe.Error) { + if bucket == "" || strings.TrimSpace(bucket) == "" { + return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{}) + } + if object == "" || strings.TrimSpace(object) == "" { + return ObjectResourcesMetadata{}, probe.NewError(InvalidArgument{}) + } + if err := xl.listXLBuckets(); err != nil { + return ObjectResourcesMetadata{}, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return ObjectResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + allBuckets, err := xl.getXLBucketMetadata() + if err != nil { + return ObjectResourcesMetadata{}, err.Trace() + } + bucketMetadata := allBuckets.Buckets[bucket] + if _, ok := bucketMetadata.Multiparts[object]; !ok { + return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID}) + } + if bucketMetadata.Multiparts[object].UploadID != resources.UploadID { + return ObjectResourcesMetadata{}, probe.NewError(InvalidUploadID{UploadID: resources.UploadID}) + } + objectResourcesMetadata := resources + objectResourcesMetadata.Bucket = bucket + objectResourcesMetadata.Key = object + var parts []*PartMetadata + var startPartNumber int + switch { + case objectResourcesMetadata.PartNumberMarker == 0: + startPartNumber = 1 + default: + startPartNumber = objectResourcesMetadata.PartNumberMarker + } + for i := startPartNumber; i <= bucketMetadata.Multiparts[object].TotalParts; i++ { + if len(parts) > objectResourcesMetadata.MaxParts { + sort.Sort(partNumber(parts)) + objectResourcesMetadata.IsTruncated = true + objectResourcesMetadata.Part = parts + objectResourcesMetadata.NextPartNumberMarker = i + return objectResourcesMetadata, nil + } + part, ok := bucketMetadata.Multiparts[object].Parts[strconv.Itoa(i)] + if !ok { + return ObjectResourcesMetadata{}, probe.NewError(InvalidPart{}) + } + parts = append(parts, &part) + } + sort.Sort(partNumber(parts)) + objectResourcesMetadata.Part = parts + return objectResourcesMetadata, nil +} + +// completeMultipartUpload complete an incomplete multipart upload +func (xl API) completeMultipartUpload(bucket, object, uploadID string, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { + if bucket == "" || strings.TrimSpace(bucket) == "" { + return ObjectMetadata{}, probe.NewError(InvalidArgument{}) + } + if object == "" || strings.TrimSpace(object) == "" { + return ObjectMetadata{}, probe.NewError(InvalidArgument{}) + } + if err := xl.listXLBuckets(); err != nil { + return ObjectMetadata{}, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + allBuckets, err := xl.getXLBucketMetadata() + if err != nil { + return ObjectMetadata{}, err.Trace() + } + bucketMetadata := allBuckets.Buckets[bucket] + if _, ok := bucketMetadata.Multiparts[object]; !ok { + return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + if bucketMetadata.Multiparts[object].UploadID != uploadID { + return ObjectMetadata{}, probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + var partBytes []byte + { + var err error + partBytes, err = ioutil.ReadAll(data) + if err != nil { + return ObjectMetadata{}, probe.NewError(err) + } + } + if signature != nil { + partHashBytes := sha256.Sum256(partBytes) + ok, err := signature.DoesSignatureMatch(hex.EncodeToString(partHashBytes[:])) + if err != nil { + return ObjectMetadata{}, err.Trace() + } + if !ok { + return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) + } + } + parts := &CompleteMultipartUpload{} + if err := xml.Unmarshal(partBytes, parts); err != nil { + return ObjectMetadata{}, probe.NewError(MalformedXML{}) + } + if !sort.IsSorted(completedParts(parts.Part)) { + return ObjectMetadata{}, probe.NewError(InvalidPartOrder{}) + } + for _, part := range parts.Part { + if strings.Trim(part.ETag, "\"") != bucketMetadata.Multiparts[object].Parts[strconv.Itoa(part.PartNumber)].ETag { + return ObjectMetadata{}, probe.NewError(InvalidPart{}) + } + } + var finalETagBytes []byte + var finalSize int64 + totalParts := strconv.Itoa(bucketMetadata.Multiparts[object].TotalParts) + for _, part := range bucketMetadata.Multiparts[object].Parts { + partETagBytes, err := hex.DecodeString(part.ETag) + if err != nil { + return ObjectMetadata{}, probe.NewError(err) + } + finalETagBytes = append(finalETagBytes, partETagBytes...) + finalSize += part.Size + } + finalETag := hex.EncodeToString(finalETagBytes) + objMetadata := ObjectMetadata{} + objMetadata.MD5Sum = finalETag + "-" + totalParts + objMetadata.Object = object + objMetadata.Bucket = bucket + objMetadata.Size = finalSize + objMetadata.Created = bucketMetadata.Multiparts[object].Parts[totalParts].LastModified + return objMetadata, nil +} + +// listMultipartUploads list all multipart uploads +func (xl API) listMultipartUploads(bucket string, resources BucketMultipartResourcesMetadata) (BucketMultipartResourcesMetadata, *probe.Error) { + if err := xl.listXLBuckets(); err != nil { + return BucketMultipartResourcesMetadata{}, err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return BucketMultipartResourcesMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + allbuckets, err := xl.getXLBucketMetadata() + if err != nil { + return BucketMultipartResourcesMetadata{}, err.Trace() + } + bucketMetadata := allbuckets.Buckets[bucket] + var uploads []*UploadMetadata + for key, session := range bucketMetadata.Multiparts { + if strings.HasPrefix(key, resources.Prefix) { + if len(uploads) > resources.MaxUploads { + sort.Sort(byKey(uploads)) + resources.Upload = uploads + resources.NextKeyMarker = key + resources.NextUploadIDMarker = session.UploadID + resources.IsTruncated = true + return resources, nil + } + // uploadIDMarker is ignored if KeyMarker is empty + switch { + case resources.KeyMarker != "" && resources.UploadIDMarker == "": + if key > resources.KeyMarker { + upload := new(UploadMetadata) + upload.Key = key + upload.UploadID = session.UploadID + upload.Initiated = session.Initiated + uploads = append(uploads, upload) + } + case resources.KeyMarker != "" && resources.UploadIDMarker != "": + if session.UploadID > resources.UploadIDMarker { + if key >= resources.KeyMarker { + upload := new(UploadMetadata) + upload.Key = key + upload.UploadID = session.UploadID + upload.Initiated = session.Initiated + uploads = append(uploads, upload) + } + } + default: + upload := new(UploadMetadata) + upload.Key = key + upload.UploadID = session.UploadID + upload.Initiated = session.Initiated + uploads = append(uploads, upload) + } + } + } + sort.Sort(byKey(uploads)) + resources.Upload = uploads + return resources, nil +} + +// abortMultipartUpload - abort a incomplete multipart upload +func (xl API) abortMultipartUpload(bucket, object, uploadID string) *probe.Error { + if err := xl.listXLBuckets(); err != nil { + return err.Trace() + } + if _, ok := xl.buckets[bucket]; !ok { + return probe.NewError(BucketNotFound{Bucket: bucket}) + } + allbuckets, err := xl.getXLBucketMetadata() + if err != nil { + return err.Trace() + } + bucketMetadata := allbuckets.Buckets[bucket] + if _, ok := bucketMetadata.Multiparts[object]; !ok { + return probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + if bucketMetadata.Multiparts[object].UploadID != uploadID { + return probe.NewError(InvalidUploadID{UploadID: uploadID}) + } + delete(bucketMetadata.Multiparts, object) + + allbuckets.Buckets[bucket] = bucketMetadata + if err := xl.setXLBucketMetadata(allbuckets); err != nil { + return err.Trace() + } + + return nil +} + +//// internal functions + +// getBucketMetadataWriters - +func (xl API) getBucketMetadataWriters() ([]io.WriteCloser, *probe.Error) { + var writers []io.WriteCloser + for _, node := range xl.nodes { + disks, err := node.ListDisks() + if err != nil { + return nil, err.Trace() + } + writers = make([]io.WriteCloser, len(disks)) + for order, disk := range disks { + bucketMetaDataWriter, err := disk.CreateFile(filepath.Join(xl.config.XLName, bucketMetadataConfig)) + if err != nil { + return nil, err.Trace() + } + writers[order] = bucketMetaDataWriter + } + } + return writers, nil +} + +// getBucketMetadataReaders - readers are returned in map rather than slice +func (xl API) getBucketMetadataReaders() (map[int]io.ReadCloser, *probe.Error) { + readers := make(map[int]io.ReadCloser) + disks := make(map[int]block.Block) + var err *probe.Error + for _, node := range xl.nodes { + nDisks := make(map[int]block.Block) + nDisks, err = node.ListDisks() + if err != nil { + return nil, err.Trace() + } + for k, v := range nDisks { + disks[k] = v + } + } + var bucketMetaDataReader io.ReadCloser + for order, disk := range disks { + bucketMetaDataReader, err = disk.Open(filepath.Join(xl.config.XLName, bucketMetadataConfig)) + if err != nil { + continue + } + readers[order] = bucketMetaDataReader + } + if err != nil { + return nil, err.Trace() + } + return readers, nil +} + +// setXLBucketMetadata - +func (xl API) setXLBucketMetadata(metadata *AllBuckets) *probe.Error { + writers, err := xl.getBucketMetadataWriters() + if err != nil { + return err.Trace() + } + for _, writer := range writers { + jenc := json.NewEncoder(writer) + if err := jenc.Encode(metadata); err != nil { + CleanupWritersOnError(writers) + return probe.NewError(err) + } + } + for _, writer := range writers { + writer.Close() + } + return nil +} + +// getXLBucketMetadata - +func (xl API) getXLBucketMetadata() (*AllBuckets, *probe.Error) { + metadata := &AllBuckets{} + readers, err := xl.getBucketMetadataReaders() + if err != nil { + return nil, err.Trace() + } + for _, reader := range readers { + defer reader.Close() + } + { + var err error + for _, reader := range readers { + jenc := json.NewDecoder(reader) + if err = jenc.Decode(metadata); err == nil { + return metadata, nil + } + } + return nil, probe.NewError(err) + } +} + +// makeXLBucket - +func (xl API) makeXLBucket(bucketName, acl string) *probe.Error { + if err := xl.listXLBuckets(); err != nil { + return err.Trace() + } + if _, ok := xl.buckets[bucketName]; ok { + return probe.NewError(BucketExists{Bucket: bucketName}) + } + bkt, bucketMetadata, err := newBucket(bucketName, acl, xl.config.XLName, xl.nodes) + if err != nil { + return err.Trace() + } + nodeNumber := 0 + xl.buckets[bucketName] = bkt + for _, node := range xl.nodes { + disks := make(map[int]block.Block) + disks, err = node.ListDisks() + if err != nil { + return err.Trace() + } + for order, disk := range disks { + bucketSlice := fmt.Sprintf("%s$%d$%d", bucketName, nodeNumber, order) + err := disk.MakeDir(filepath.Join(xl.config.XLName, bucketSlice)) + if err != nil { + return err.Trace() + } + } + nodeNumber = nodeNumber + 1 + } + var metadata *AllBuckets + metadata, err = xl.getXLBucketMetadata() + if err != nil { + if os.IsNotExist(err.ToGoError()) { + metadata = new(AllBuckets) + metadata.Buckets = make(map[string]BucketMetadata) + metadata.Buckets[bucketName] = bucketMetadata + err = xl.setXLBucketMetadata(metadata) + if err != nil { + return err.Trace() + } + return nil + } + return err.Trace() + } + metadata.Buckets[bucketName] = bucketMetadata + err = xl.setXLBucketMetadata(metadata) + if err != nil { + return err.Trace() + } + return nil +} + +// listXLBuckets - +func (xl API) listXLBuckets() *probe.Error { + var disks map[int]block.Block + var err *probe.Error + for _, node := range xl.nodes { + disks, err = node.ListDisks() + if err != nil { + return err.Trace() + } + } + var dirs []os.FileInfo + for _, disk := range disks { + dirs, err = disk.ListDir(xl.config.XLName) + if err == nil { + break + } + } + // if all disks are missing then return error + if err != nil { + return err.Trace() + } + for _, dir := range dirs { + splitDir := strings.Split(dir.Name(), "$") + if len(splitDir) < 3 { + return probe.NewError(CorruptedBackend{Backend: dir.Name()}) + } + bucketName := splitDir[0] + // we dont need this once we cache from makeXLBucket() + bkt, _, err := newBucket(bucketName, "private", xl.config.XLName, xl.nodes) + if err != nil { + return err.Trace() + } + xl.buckets[bucketName] = bkt + } + return nil +} diff --git a/pkg/xl/xl-v1_test.go b/pkg/xl/xl-v1_test.go new file mode 100644 index 000000000..0a91a315a --- /dev/null +++ b/pkg/xl/xl-v1_test.go @@ -0,0 +1,290 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedd. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "testing" + + . "gopkg.in/check.v1" +) + +func TestXL(t *testing.T) { TestingT(t) } + +type MyXLSuite struct { + root string +} + +var _ = Suite(&MyXLSuite{}) + +// create a dummy TestNodeDiskMap +func createTestNodeDiskMap(p string) map[string][]string { + nodes := make(map[string][]string) + nodes["localhost"] = make([]string, 16) + for i := 0; i < len(nodes["localhost"]); i++ { + diskPath := filepath.Join(p, strconv.Itoa(i)) + if _, err := os.Stat(diskPath); err != nil { + if os.IsNotExist(err) { + os.MkdirAll(diskPath, 0700) + } + } + nodes["localhost"][i] = diskPath + } + return nodes +} + +var dd Interface + +func (s *MyXLSuite) SetUpSuite(c *C) { + root, err := ioutil.TempDir(os.TempDir(), "xl-") + c.Assert(err, IsNil) + s.root = root + + conf := new(Config) + conf.Version = "0.0.1" + conf.XLName = "test" + conf.NodeDiskMap = createTestNodeDiskMap(root) + conf.MaxSize = 100000 + SetXLConfigPath(filepath.Join(root, "xl.json")) + perr := SaveConfig(conf) + c.Assert(perr, IsNil) + + dd, perr = New() + c.Assert(perr, IsNil) + + // testing empty xl + buckets, perr := dd.ListBuckets() + c.Assert(perr, IsNil) + c.Assert(len(buckets), Equals, 0) +} + +func (s *MyXLSuite) TearDownSuite(c *C) { + os.RemoveAll(s.root) +} + +// test make bucket without name +func (s *MyXLSuite) TestBucketWithoutNameFails(c *C) { + // fail to create new bucket without a name + err := dd.MakeBucket("", "private", nil, nil) + c.Assert(err, Not(IsNil)) + + err = dd.MakeBucket(" ", "private", nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test empty bucket +func (s *MyXLSuite) TestEmptyBucket(c *C) { + c.Assert(dd.MakeBucket("foo1", "private", nil, nil), IsNil) + // check if bucket is empty + var resources BucketResourcesMetadata + resources.Maxkeys = 1 + objectsMetadata, resources, err := dd.ListObjects("foo1", resources) + c.Assert(err, IsNil) + c.Assert(len(objectsMetadata), Equals, 0) + c.Assert(resources.CommonPrefixes, DeepEquals, []string{}) + c.Assert(resources.IsTruncated, Equals, false) +} + +// test bucket list +func (s *MyXLSuite) TestMakeBucketAndList(c *C) { + // create bucket + err := dd.MakeBucket("foo2", "private", nil, nil) + c.Assert(err, IsNil) + + // check bucket exists + buckets, err := dd.ListBuckets() + c.Assert(err, IsNil) + c.Assert(len(buckets), Equals, 5) + c.Assert(buckets[0].ACL, Equals, BucketACL("private")) +} + +// test re-create bucket +func (s *MyXLSuite) TestMakeBucketWithSameNameFails(c *C) { + err := dd.MakeBucket("foo3", "private", nil, nil) + c.Assert(err, IsNil) + + err = dd.MakeBucket("foo3", "private", nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test make multiple buckets +func (s *MyXLSuite) TestCreateMultipleBucketsAndList(c *C) { + // add a second bucket + err := dd.MakeBucket("foo4", "private", nil, nil) + c.Assert(err, IsNil) + + err = dd.MakeBucket("bar1", "private", nil, nil) + c.Assert(err, IsNil) + + buckets, err := dd.ListBuckets() + c.Assert(err, IsNil) + + c.Assert(len(buckets), Equals, 2) + c.Assert(buckets[0].Name, Equals, "bar1") + c.Assert(buckets[1].Name, Equals, "foo4") + + err = dd.MakeBucket("foobar1", "private", nil, nil) + c.Assert(err, IsNil) + + buckets, err = dd.ListBuckets() + c.Assert(err, IsNil) + + c.Assert(len(buckets), Equals, 3) + c.Assert(buckets[2].Name, Equals, "foobar1") +} + +// test object create without bucket +func (s *MyXLSuite) TestNewObjectFailsWithoutBucket(c *C) { + _, err := dd.CreateObject("unknown", "obj", "", 0, nil, nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test create object metadata +func (s *MyXLSuite) TestNewObjectMetadata(c *C) { + data := "Hello World" + hasher := md5.New() + hasher.Write([]byte(data)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) + + err := dd.MakeBucket("foo6", "private", nil, nil) + c.Assert(err, IsNil) + + objectMetadata, err := dd.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}, nil) + c.Assert(err, IsNil) + c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) + c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json") +} + +// test create object fails without name +func (s *MyXLSuite) TestNewObjectFailsWithEmptyName(c *C) { + _, err := dd.CreateObject("foo", "", "", 0, nil, nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test create object +func (s *MyXLSuite) TestNewObjectCanBeWritten(c *C) { + err := dd.MakeBucket("foo", "private", nil, nil) + c.Assert(err, IsNil) + + data := "Hello World" + + hasher := md5.New() + hasher.Write([]byte(data)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) + + actualMetadata, err := dd.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}, nil) + c.Assert(err, IsNil) + c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) + + var buffer bytes.Buffer + size, err := dd.GetObject(&buffer, "foo", "obj", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len(data))) + c.Assert(buffer.Bytes(), DeepEquals, []byte(data)) + + actualMetadata, err = dd.GetObjectMetadata("foo", "obj") + c.Assert(err, IsNil) + c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum) + c.Assert(int64(len(data)), Equals, actualMetadata.Size) +} + +// test list objects +func (s *MyXLSuite) TestMultipleNewObjects(c *C) { + c.Assert(dd.MakeBucket("foo5", "private", nil, nil), IsNil) + + one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) + + _, err := dd.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil, nil) + c.Assert(err, IsNil) + + two := ioutil.NopCloser(bytes.NewReader([]byte("two"))) + _, err = dd.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil, nil) + c.Assert(err, IsNil) + + var buffer1 bytes.Buffer + size, err := dd.GetObject(&buffer1, "foo5", "obj1", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("one")))) + c.Assert(buffer1.Bytes(), DeepEquals, []byte("one")) + + var buffer2 bytes.Buffer + size, err = dd.GetObject(&buffer2, "foo5", "obj2", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("two")))) + + c.Assert(buffer2.Bytes(), DeepEquals, []byte("two")) + + /// test list of objects + + // test list objects with prefix and delimiter + var resources BucketResourcesMetadata + resources.Prefix = "o" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err := dd.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") + + // test list objects with only delimiter + resources.Prefix = "" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err = dd.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(objectsMetadata[0].Object, Equals, "obj2") + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") + + // test list objects with only prefix + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 10 + objectsMetadata, resources, err = dd.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(objectsMetadata[0].Object, Equals, "obj1") + c.Assert(objectsMetadata[1].Object, Equals, "obj2") + + three := ioutil.NopCloser(bytes.NewReader([]byte("three"))) + _, err = dd.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil, nil) + c.Assert(err, IsNil) + + var buffer bytes.Buffer + size, err = dd.GetObject(&buffer, "foo5", "obj3", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("three")))) + c.Assert(buffer.Bytes(), DeepEquals, []byte("three")) + + // test list objects with maxkeys + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 2 + objectsMetadata, resources, err = dd.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, true) + c.Assert(len(objectsMetadata), Equals, 2) +} diff --git a/pkg/xl/xl-v2.go b/pkg/xl/xl-v2.go new file mode 100644 index 000000000..9d832f798 --- /dev/null +++ b/pkg/xl/xl-v2.go @@ -0,0 +1,637 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "io" + "io/ioutil" + "log" + "runtime/debug" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/minio/minio/pkg/crypto/sha256" + "github.com/minio/minio/pkg/probe" + "github.com/minio/minio/pkg/quick" + signV4 "github.com/minio/minio/pkg/signature" + "github.com/minio/minio/pkg/xl/cache/data" + "github.com/minio/minio/pkg/xl/cache/metadata" +) + +// total Number of buckets allowed +const ( + totalBuckets = 100 +) + +// Config xl config +type Config struct { + Version string `json:"version"` + MaxSize uint64 `json:"max-size"` + XLName string `json:"xl-name"` + NodeDiskMap map[string][]string `json:"node-disk-map"` +} + +// API - local variables +type API struct { + config *Config + lock *sync.Mutex + objects *data.Cache + multiPartObjects map[string]*data.Cache + storedBuckets *metadata.Cache + nodes map[string]node + buckets map[string]bucket +} + +// storedBucket saved bucket +type storedBucket struct { + bucketMetadata BucketMetadata + objectMetadata map[string]ObjectMetadata + partMetadata map[string]map[int]PartMetadata + multiPartSession map[string]MultiPartSession +} + +// New instantiate a new xl +func New() (Interface, *probe.Error) { + var conf *Config + var err *probe.Error + conf, err = LoadConfig() + if err != nil { + conf = &Config{ + Version: "0.0.1", + MaxSize: 512000000, + NodeDiskMap: nil, + XLName: "", + } + if err := quick.CheckData(conf); err != nil { + return nil, err.Trace() + } + } + a := API{config: conf} + a.storedBuckets = metadata.NewCache() + a.nodes = make(map[string]node) + a.buckets = make(map[string]bucket) + a.objects = data.NewCache(a.config.MaxSize) + a.multiPartObjects = make(map[string]*data.Cache) + a.objects.OnEvicted = a.evictedObject + a.lock = new(sync.Mutex) + + if len(a.config.NodeDiskMap) > 0 { + for k, v := range a.config.NodeDiskMap { + if len(v) == 0 { + return nil, probe.NewError(InvalidDisksArgument{}) + } + err := a.AttachNode(k, v) + if err != nil { + return nil, err.Trace() + } + } + /// Initialization, populate all buckets into memory + buckets, err := a.listBuckets() + if err != nil { + return nil, err.Trace() + } + for k, v := range buckets { + var newBucket = storedBucket{} + newBucket.bucketMetadata = v + newBucket.objectMetadata = make(map[string]ObjectMetadata) + newBucket.multiPartSession = make(map[string]MultiPartSession) + newBucket.partMetadata = make(map[string]map[int]PartMetadata) + a.storedBuckets.Set(k, newBucket) + } + a.Heal() + } + return a, nil +} + +/// V2 API functions + +// GetObject - GET object from cache buffer +func (xl API) GetObject(w io.Writer, bucket string, object string, start, length int64) (int64, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return 0, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(object) { + return 0, probe.NewError(ObjectNameInvalid{Object: object}) + } + if start < 0 { + return 0, probe.NewError(InvalidRange{ + Start: start, + Length: length, + }) + } + if !xl.storedBuckets.Exists(bucket) { + return 0, probe.NewError(BucketNotFound{Bucket: bucket}) + } + objectKey := bucket + "/" + object + data, ok := xl.objects.Get(objectKey) + var written int64 + if !ok { + if len(xl.config.NodeDiskMap) > 0 { + reader, size, err := xl.getObject(bucket, object) + if err != nil { + return 0, err.Trace() + } + if start > 0 { + if _, err := io.CopyN(ioutil.Discard, reader, start); err != nil { + return 0, probe.NewError(err) + } + } + // new proxy writer to capture data read from disk + pw := NewProxyWriter(w) + { + var err error + if length > 0 { + written, err = io.CopyN(pw, reader, length) + if err != nil { + return 0, probe.NewError(err) + } + } else { + written, err = io.CopyN(pw, reader, size) + if err != nil { + return 0, probe.NewError(err) + } + } + } + /// cache object read from disk + ok := xl.objects.Append(objectKey, pw.writtenBytes) + pw.writtenBytes = nil + go debug.FreeOSMemory() + if !ok { + return 0, probe.NewError(InternalError{}) + } + return written, nil + } + return 0, probe.NewError(ObjectNotFound{Object: object}) + } + var err error + if start == 0 && length == 0 { + written, err = io.CopyN(w, bytes.NewBuffer(data), int64(xl.objects.Len(objectKey))) + if err != nil { + return 0, probe.NewError(err) + } + return written, nil + } + written, err = io.CopyN(w, bytes.NewBuffer(data[start:]), length) + if err != nil { + return 0, probe.NewError(err) + } + return written, nil +} + +// GetBucketMetadata - +func (xl API) GetBucketMetadata(bucket string) (BucketMetadata, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return BucketMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !xl.storedBuckets.Exists(bucket) { + if len(xl.config.NodeDiskMap) > 0 { + bucketMetadata, err := xl.getBucketMetadata(bucket) + if err != nil { + return BucketMetadata{}, err.Trace() + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + storedBucket.bucketMetadata = bucketMetadata + xl.storedBuckets.Set(bucket, storedBucket) + } + return BucketMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + return xl.storedBuckets.Get(bucket).(storedBucket).bucketMetadata, nil +} + +// SetBucketMetadata - +func (xl API) SetBucketMetadata(bucket string, metadata map[string]string) *probe.Error { + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !xl.storedBuckets.Exists(bucket) { + return probe.NewError(BucketNotFound{Bucket: bucket}) + } + if len(xl.config.NodeDiskMap) > 0 { + if err := xl.setBucketMetadata(bucket, metadata); err != nil { + return err.Trace() + } + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + storedBucket.bucketMetadata.ACL = BucketACL(metadata["acl"]) + xl.storedBuckets.Set(bucket, storedBucket) + return nil +} + +// isMD5SumEqual - returns error if md5sum mismatches, success its `nil` +func isMD5SumEqual(expectedMD5Sum, actualMD5Sum string) *probe.Error { + if strings.TrimSpace(expectedMD5Sum) != "" && strings.TrimSpace(actualMD5Sum) != "" { + expectedMD5SumBytes, err := hex.DecodeString(expectedMD5Sum) + if err != nil { + return probe.NewError(err) + } + actualMD5SumBytes, err := hex.DecodeString(actualMD5Sum) + if err != nil { + return probe.NewError(err) + } + if !bytes.Equal(expectedMD5SumBytes, actualMD5SumBytes) { + return probe.NewError(BadDigest{}) + } + return nil + } + return probe.NewError(InvalidArgument{}) +} + +// CreateObject - create an object +func (xl API) CreateObject(bucket, key, expectedMD5Sum string, size int64, data io.Reader, metadata map[string]string, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + + contentType := metadata["contentType"] + objectMetadata, err := xl.createObject(bucket, key, contentType, expectedMD5Sum, size, data, signature) + // free + debug.FreeOSMemory() + + return objectMetadata, err.Trace() +} + +// createObject - PUT object to cache buffer +func (xl API) createObject(bucket, key, contentType, expectedMD5Sum string, size int64, data io.Reader, signature *signV4.Signature) (ObjectMetadata, *probe.Error) { + if len(xl.config.NodeDiskMap) == 0 { + if size > int64(xl.config.MaxSize) { + generic := GenericObjectError{Bucket: bucket, Object: key} + return ObjectMetadata{}, probe.NewError(EntityTooLarge{ + GenericObjectError: generic, + Size: strconv.FormatInt(size, 10), + MaxSize: strconv.FormatUint(xl.config.MaxSize, 10), + }) + } + } + if !IsValidBucket(bucket) { + return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(key) { + return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key}) + } + if !xl.storedBuckets.Exists(bucket) { + return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + // get object key + objectKey := bucket + "/" + key + if _, ok := storedBucket.objectMetadata[objectKey]; ok == true { + return ObjectMetadata{}, probe.NewError(ObjectExists{Object: key}) + } + + if contentType == "" { + contentType = "application/octet-stream" + } + contentType = strings.TrimSpace(contentType) + if strings.TrimSpace(expectedMD5Sum) != "" { + expectedMD5SumBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(expectedMD5Sum)) + if err != nil { + // pro-actively close the connection + return ObjectMetadata{}, probe.NewError(InvalidDigest{Md5: expectedMD5Sum}) + } + expectedMD5Sum = hex.EncodeToString(expectedMD5SumBytes) + } + + if len(xl.config.NodeDiskMap) > 0 { + objMetadata, err := xl.putObject( + bucket, + key, + expectedMD5Sum, + data, + size, + map[string]string{ + "contentType": contentType, + "contentLength": strconv.FormatInt(size, 10), + }, + signature, + ) + if err != nil { + return ObjectMetadata{}, err.Trace() + } + storedBucket.objectMetadata[objectKey] = objMetadata + xl.storedBuckets.Set(bucket, storedBucket) + return objMetadata, nil + } + + // calculate md5 + hash := md5.New() + sha256hash := sha256.New() + + var err error + var totalLength int64 + for err == nil { + var length int + byteBuffer := make([]byte, 1024*1024) + length, err = data.Read(byteBuffer) + if length != 0 { + hash.Write(byteBuffer[0:length]) + sha256hash.Write(byteBuffer[0:length]) + ok := xl.objects.Append(objectKey, byteBuffer[0:length]) + if !ok { + return ObjectMetadata{}, probe.NewError(InternalError{}) + } + totalLength += int64(length) + go debug.FreeOSMemory() + } + } + if size != 0 { + if totalLength != size { + // Delete perhaps the object is already saved, due to the nature of append() + xl.objects.Delete(objectKey) + return ObjectMetadata{}, probe.NewError(IncompleteBody{Bucket: bucket, Object: key}) + } + } + if err != io.EOF { + return ObjectMetadata{}, probe.NewError(err) + } + md5SumBytes := hash.Sum(nil) + md5Sum := hex.EncodeToString(md5SumBytes) + // Verify if the written object is equal to what is expected, only if it is requested as such + if strings.TrimSpace(expectedMD5Sum) != "" { + if err := isMD5SumEqual(strings.TrimSpace(expectedMD5Sum), md5Sum); err != nil { + // Delete perhaps the object is already saved, due to the nature of append() + xl.objects.Delete(objectKey) + return ObjectMetadata{}, probe.NewError(BadDigest{}) + } + } + if signature != nil { + ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256hash.Sum(nil))) + if err != nil { + // Delete perhaps the object is already saved, due to the nature of append() + xl.objects.Delete(objectKey) + return ObjectMetadata{}, err.Trace() + } + if !ok { + // Delete perhaps the object is already saved, due to the nature of append() + xl.objects.Delete(objectKey) + return ObjectMetadata{}, probe.NewError(signV4.SigDoesNotMatch{}) + } + } + + m := make(map[string]string) + m["contentType"] = contentType + newObject := ObjectMetadata{ + Bucket: bucket, + Object: key, + + Metadata: m, + Created: time.Now().UTC(), + MD5Sum: md5Sum, + Size: int64(totalLength), + } + + storedBucket.objectMetadata[objectKey] = newObject + xl.storedBuckets.Set(bucket, storedBucket) + return newObject, nil +} + +// MakeBucket - create bucket in cache +func (xl API) MakeBucket(bucketName, acl string, location io.Reader, signature *signV4.Signature) *probe.Error { + xl.lock.Lock() + defer xl.lock.Unlock() + + // do not have to parse location constraint, using this just for signature verification + locationSum := "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + if location != nil { + locationConstraintBytes, err := ioutil.ReadAll(location) + if err != nil { + return probe.NewError(InternalError{}) + } + locationConstraintHashBytes := sha256.Sum256(locationConstraintBytes) + locationSum = hex.EncodeToString(locationConstraintHashBytes[:]) + } + + if signature != nil { + ok, err := signature.DoesSignatureMatch(locationSum) + if err != nil { + return err.Trace() + } + if !ok { + return probe.NewError(signV4.SigDoesNotMatch{}) + } + } + + if xl.storedBuckets.Stats().Items == totalBuckets { + return probe.NewError(TooManyBuckets{Bucket: bucketName}) + } + if !IsValidBucket(bucketName) { + return probe.NewError(BucketNameInvalid{Bucket: bucketName}) + } + if !IsValidBucketACL(acl) { + return probe.NewError(InvalidACL{ACL: acl}) + } + if xl.storedBuckets.Exists(bucketName) { + return probe.NewError(BucketExists{Bucket: bucketName}) + } + + if strings.TrimSpace(acl) == "" { + // default is private + acl = "private" + } + if len(xl.config.NodeDiskMap) > 0 { + if err := xl.makeBucket(bucketName, BucketACL(acl)); err != nil { + return err.Trace() + } + } + var newBucket = storedBucket{} + newBucket.objectMetadata = make(map[string]ObjectMetadata) + newBucket.multiPartSession = make(map[string]MultiPartSession) + newBucket.partMetadata = make(map[string]map[int]PartMetadata) + newBucket.bucketMetadata = BucketMetadata{} + newBucket.bucketMetadata.Name = bucketName + newBucket.bucketMetadata.Created = time.Now().UTC() + newBucket.bucketMetadata.ACL = BucketACL(acl) + xl.storedBuckets.Set(bucketName, newBucket) + return nil +} + +// ListObjects - list objects from cache +func (xl API) ListObjects(bucket string, resources BucketResourcesMetadata) ([]ObjectMetadata, BucketResourcesMetadata, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + + if !IsValidBucket(bucket) { + return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidPrefix(resources.Prefix) { + return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(ObjectNameInvalid{Object: resources.Prefix}) + } + if !xl.storedBuckets.Exists(bucket) { + return nil, BucketResourcesMetadata{IsTruncated: false}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + var results []ObjectMetadata + var keys []string + if len(xl.config.NodeDiskMap) > 0 { + listObjects, err := xl.listObjects( + bucket, + resources.Prefix, + resources.Marker, + resources.Delimiter, + resources.Maxkeys, + ) + if err != nil { + return nil, BucketResourcesMetadata{IsTruncated: false}, err.Trace() + } + resources.CommonPrefixes = listObjects.CommonPrefixes + resources.IsTruncated = listObjects.IsTruncated + for key := range listObjects.Objects { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + results = append(results, listObjects.Objects[key]) + } + if resources.IsTruncated && resources.Delimiter != "" { + resources.NextMarker = results[len(results)-1].Object + } + return results, resources, nil + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + for key := range storedBucket.objectMetadata { + if strings.HasPrefix(key, bucket+"/") { + key = key[len(bucket)+1:] + if strings.HasPrefix(key, resources.Prefix) { + if key > resources.Marker { + keys = append(keys, key) + } + } + } + } + if strings.TrimSpace(resources.Prefix) != "" { + keys = TrimPrefix(keys, resources.Prefix) + } + var prefixes []string + var filteredKeys []string + filteredKeys = keys + if strings.TrimSpace(resources.Delimiter) != "" { + filteredKeys = HasNoDelimiter(keys, resources.Delimiter) + prefixes = HasDelimiter(keys, resources.Delimiter) + prefixes = SplitDelimiter(prefixes, resources.Delimiter) + prefixes = SortUnique(prefixes) + } + for _, commonPrefix := range prefixes { + resources.CommonPrefixes = append(resources.CommonPrefixes, resources.Prefix+commonPrefix) + } + filteredKeys = RemoveDuplicates(filteredKeys) + sort.Strings(filteredKeys) + + for _, key := range filteredKeys { + if len(results) == resources.Maxkeys { + resources.IsTruncated = true + if resources.IsTruncated && resources.Delimiter != "" { + resources.NextMarker = results[len(results)-1].Object + } + return results, resources, nil + } + object := storedBucket.objectMetadata[bucket+"/"+resources.Prefix+key] + results = append(results, object) + } + resources.CommonPrefixes = RemoveDuplicates(resources.CommonPrefixes) + sort.Strings(resources.CommonPrefixes) + return results, resources, nil +} + +// byBucketName is a type for sorting bucket metadata by bucket name +type byBucketName []BucketMetadata + +func (b byBucketName) Len() int { return len(b) } +func (b byBucketName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byBucketName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +// ListBuckets - List buckets from cache +func (xl API) ListBuckets() ([]BucketMetadata, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + + var results []BucketMetadata + if len(xl.config.NodeDiskMap) > 0 { + buckets, err := xl.listBuckets() + if err != nil { + return nil, err.Trace() + } + for _, bucketMetadata := range buckets { + results = append(results, bucketMetadata) + } + sort.Sort(byBucketName(results)) + return results, nil + } + for _, bucket := range xl.storedBuckets.GetAll() { + results = append(results, bucket.(storedBucket).bucketMetadata) + } + sort.Sort(byBucketName(results)) + return results, nil +} + +// GetObjectMetadata - get object metadata from cache +func (xl API) GetObjectMetadata(bucket, key string) (ObjectMetadata, *probe.Error) { + xl.lock.Lock() + defer xl.lock.Unlock() + + // check if bucket exists + if !IsValidBucket(bucket) { + return ObjectMetadata{}, probe.NewError(BucketNameInvalid{Bucket: bucket}) + } + if !IsValidObjectName(key) { + return ObjectMetadata{}, probe.NewError(ObjectNameInvalid{Object: key}) + } + if !xl.storedBuckets.Exists(bucket) { + return ObjectMetadata{}, probe.NewError(BucketNotFound{Bucket: bucket}) + } + storedBucket := xl.storedBuckets.Get(bucket).(storedBucket) + objectKey := bucket + "/" + key + if objMetadata, ok := storedBucket.objectMetadata[objectKey]; ok == true { + return objMetadata, nil + } + if len(xl.config.NodeDiskMap) > 0 { + objMetadata, err := xl.getObjectMetadata(bucket, key) + if err != nil { + return ObjectMetadata{}, err.Trace() + } + // update + storedBucket.objectMetadata[objectKey] = objMetadata + xl.storedBuckets.Set(bucket, storedBucket) + return objMetadata, nil + } + return ObjectMetadata{}, probe.NewError(ObjectNotFound{Object: key}) +} + +// evictedObject callback function called when an item is evicted from memory +func (xl API) evictedObject(a ...interface{}) { + cacheStats := xl.objects.Stats() + log.Printf("CurrentSize: %d, CurrentItems: %d, TotalEvicted: %d", + cacheStats.Bytes, cacheStats.Items, cacheStats.Evicted) + key := a[0].(string) + // loop through all buckets + for _, bucket := range xl.storedBuckets.GetAll() { + delete(bucket.(storedBucket).objectMetadata, key) + } + debug.FreeOSMemory() +} diff --git a/pkg/xl/xl-v2_test.go b/pkg/xl/xl-v2_test.go new file mode 100644 index 000000000..b82f54fcd --- /dev/null +++ b/pkg/xl/xl-v2_test.go @@ -0,0 +1,265 @@ +/* + * Minio Cloud Storage, (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedc. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package xl + +import ( + "bytes" + "crypto/md5" + "encoding/base64" + "encoding/hex" + "io/ioutil" + "os" + "path/filepath" + "testing" + + . "gopkg.in/check.v1" +) + +func TestCache(t *testing.T) { TestingT(t) } + +type MyCacheSuite struct { + root string +} + +var _ = Suite(&MyCacheSuite{}) + +var dc Interface + +func (s *MyCacheSuite) SetUpSuite(c *C) { + root, err := ioutil.TempDir(os.TempDir(), "xl-") + c.Assert(err, IsNil) + s.root = root + + SetXLConfigPath(filepath.Join(root, "xl.json")) + dc, _ = New() + + // testing empty cache + var buckets []BucketMetadata + buckets, perr := dc.ListBuckets() + c.Assert(perr, IsNil) + c.Assert(len(buckets), Equals, 0) +} + +func (s *MyCacheSuite) TearDownSuite(c *C) { + os.RemoveAll(s.root) +} + +// test make bucket without name +func (s *MyCacheSuite) TestBucketWithoutNameFails(c *C) { + // fail to create new bucket without a name + err := dc.MakeBucket("", "private", nil, nil) + c.Assert(err, Not(IsNil)) + + err = dc.MakeBucket(" ", "private", nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test empty bucket +func (s *MyCacheSuite) TestEmptyBucket(c *C) { + c.Assert(dc.MakeBucket("foo1", "private", nil, nil), IsNil) + // check if bucket is empty + var resources BucketResourcesMetadata + resources.Maxkeys = 1 + objectsMetadata, resources, err := dc.ListObjects("foo1", resources) + c.Assert(err, IsNil) + c.Assert(len(objectsMetadata), Equals, 0) + c.Assert(resources.CommonPrefixes, DeepEquals, []string{}) + c.Assert(resources.IsTruncated, Equals, false) +} + +// test bucket list +func (s *MyCacheSuite) TestMakeBucketAndList(c *C) { + // create bucket + err := dc.MakeBucket("foo2", "private", nil, nil) + c.Assert(err, IsNil) + + // check bucket exists + buckets, err := dc.ListBuckets() + c.Assert(err, IsNil) + c.Assert(len(buckets), Equals, 5) + c.Assert(buckets[0].ACL, Equals, BucketACL("private")) +} + +// test re-create bucket +func (s *MyCacheSuite) TestMakeBucketWithSameNameFails(c *C) { + err := dc.MakeBucket("foo3", "private", nil, nil) + c.Assert(err, IsNil) + + err = dc.MakeBucket("foo3", "private", nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test make multiple buckets +func (s *MyCacheSuite) TestCreateMultipleBucketsAndList(c *C) { + // add a second bucket + err := dc.MakeBucket("foo4", "private", nil, nil) + c.Assert(err, IsNil) + + err = dc.MakeBucket("bar1", "private", nil, nil) + c.Assert(err, IsNil) + + buckets, err := dc.ListBuckets() + c.Assert(err, IsNil) + + c.Assert(len(buckets), Equals, 2) + c.Assert(buckets[0].Name, Equals, "bar1") + c.Assert(buckets[1].Name, Equals, "foo4") + + err = dc.MakeBucket("foobar1", "private", nil, nil) + c.Assert(err, IsNil) + + buckets, err = dc.ListBuckets() + c.Assert(err, IsNil) + + c.Assert(len(buckets), Equals, 3) + c.Assert(buckets[2].Name, Equals, "foobar1") +} + +// test object create without bucket +func (s *MyCacheSuite) TestNewObjectFailsWithoutBucket(c *C) { + _, err := dc.CreateObject("unknown", "obj", "", 0, nil, nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test create object metadata +func (s *MyCacheSuite) TestNewObjectMetadata(c *C) { + data := "Hello World" + hasher := md5.New() + hasher.Write([]byte(data)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) + + err := dc.MakeBucket("foo6", "private", nil, nil) + c.Assert(err, IsNil) + + objectMetadata, err := dc.CreateObject("foo6", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/json"}, nil) + c.Assert(err, IsNil) + c.Assert(objectMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) + c.Assert(objectMetadata.Metadata["contentType"], Equals, "application/json") +} + +// test create object fails without name +func (s *MyCacheSuite) TestNewObjectFailsWithEmptyName(c *C) { + _, err := dc.CreateObject("foo", "", "", 0, nil, nil, nil) + c.Assert(err, Not(IsNil)) +} + +// test create object +func (s *MyCacheSuite) TestNewObjectCanBeWritten(c *C) { + err := dc.MakeBucket("foo", "private", nil, nil) + c.Assert(err, IsNil) + + data := "Hello World" + + hasher := md5.New() + hasher.Write([]byte(data)) + expectedMd5Sum := base64.StdEncoding.EncodeToString(hasher.Sum(nil)) + reader := ioutil.NopCloser(bytes.NewReader([]byte(data))) + + actualMetadata, err := dc.CreateObject("foo", "obj", expectedMd5Sum, int64(len(data)), reader, map[string]string{"contentType": "application/octet-stream"}, nil) + c.Assert(err, IsNil) + c.Assert(actualMetadata.MD5Sum, Equals, hex.EncodeToString(hasher.Sum(nil))) + + var buffer bytes.Buffer + size, err := dc.GetObject(&buffer, "foo", "obj", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len(data))) + c.Assert(buffer.Bytes(), DeepEquals, []byte(data)) + + actualMetadata, err = dc.GetObjectMetadata("foo", "obj") + c.Assert(err, IsNil) + c.Assert(hex.EncodeToString(hasher.Sum(nil)), Equals, actualMetadata.MD5Sum) + c.Assert(int64(len(data)), Equals, actualMetadata.Size) +} + +// test list objects +func (s *MyCacheSuite) TestMultipleNewObjects(c *C) { + c.Assert(dc.MakeBucket("foo5", "private", nil, nil), IsNil) + + one := ioutil.NopCloser(bytes.NewReader([]byte("one"))) + + _, err := dc.CreateObject("foo5", "obj1", "", int64(len("one")), one, nil, nil) + c.Assert(err, IsNil) + + two := ioutil.NopCloser(bytes.NewReader([]byte("two"))) + _, err = dc.CreateObject("foo5", "obj2", "", int64(len("two")), two, nil, nil) + c.Assert(err, IsNil) + + var buffer1 bytes.Buffer + size, err := dc.GetObject(&buffer1, "foo5", "obj1", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("one")))) + c.Assert(buffer1.Bytes(), DeepEquals, []byte("one")) + + var buffer2 bytes.Buffer + size, err = dc.GetObject(&buffer2, "foo5", "obj2", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("two")))) + + c.Assert(buffer2.Bytes(), DeepEquals, []byte("two")) + + /// test list of objects + + // test list objects with prefix and delimiter + var resources BucketResourcesMetadata + resources.Prefix = "o" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err := dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") + + // test list objects with only delimiter + resources.Prefix = "" + resources.Delimiter = "1" + resources.Maxkeys = 10 + objectsMetadata, resources, err = dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(objectsMetadata[0].Object, Equals, "obj2") + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(resources.CommonPrefixes[0], Equals, "obj1") + + // test list objects with only prefix + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 10 + objectsMetadata, resources, err = dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, false) + c.Assert(objectsMetadata[0].Object, Equals, "obj1") + c.Assert(objectsMetadata[1].Object, Equals, "obj2") + + three := ioutil.NopCloser(bytes.NewReader([]byte("three"))) + _, err = dc.CreateObject("foo5", "obj3", "", int64(len("three")), three, nil, nil) + c.Assert(err, IsNil) + + var buffer bytes.Buffer + size, err = dc.GetObject(&buffer, "foo5", "obj3", 0, 0) + c.Assert(err, IsNil) + c.Assert(size, Equals, int64(len([]byte("three")))) + c.Assert(buffer.Bytes(), DeepEquals, []byte("three")) + + // test list objects with maxkeys + resources.Prefix = "o" + resources.Delimiter = "" + resources.Maxkeys = 2 + objectsMetadata, resources, err = dc.ListObjects("foo5", resources) + c.Assert(err, IsNil) + c.Assert(resources.IsTruncated, Equals, true) + c.Assert(len(objectsMetadata), Equals, 2) +} diff --git a/routers.go b/routers.go index 3ed0811f5..8ffb86c6f 100644 --- a/routers.go +++ b/routers.go @@ -24,8 +24,8 @@ import ( jsonrpc "github.com/gorilla/rpc/v2" "github.com/gorilla/rpc/v2/json" "github.com/minio/minio-go" - "github.com/minio/minio-xl/pkg/probe" "github.com/minio/minio/pkg/fs" + "github.com/minio/minio/pkg/probe" ) // CloudStorageAPI container for S3 compatible API. diff --git a/server-config.go b/server-config.go index b6dab99b7..f39fcbf94 100644 --- a/server-config.go +++ b/server-config.go @@ -24,8 +24,8 @@ import ( "path/filepath" "github.com/fatih/color" - "github.com/minio/minio-xl/pkg/probe" - "github.com/minio/minio-xl/pkg/quick" + "github.com/minio/minio/pkg/probe" + "github.com/minio/minio/pkg/quick" "github.com/minio/minio/pkg/user" ) diff --git a/server-main.go b/server-main.go index e8508d0ce..f02374742 100644 --- a/server-main.go +++ b/server-main.go @@ -29,8 +29,8 @@ import ( "github.com/fatih/color" "github.com/minio/cli" - "github.com/minio/minio-xl/pkg/minhttp" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/minhttp" + "github.com/minio/minio/pkg/probe" ) var serverCmd = cli.Command{ diff --git a/signature-handler.go b/signature-handler.go index f5dd1edc2..1b20c8a19 100644 --- a/signature-handler.go +++ b/signature-handler.go @@ -21,9 +21,9 @@ import ( "net/http" "strings" - "github.com/minio/minio-xl/pkg/crypto/sha256" - "github.com/minio/minio-xl/pkg/probe" - "github.com/minio/minio/pkg/fs" + "github.com/minio/minio/pkg/crypto/sha256" + "github.com/minio/minio/pkg/probe" + v4 "github.com/minio/minio/pkg/signature" ) type signatureHandler struct { @@ -72,7 +72,7 @@ func (s signatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.handler.ServeHTTP(w, r) return } - var signature *fs.Signature + var signature *v4.Signature if isRequestSignatureV4(r) { // For PUT and POST requests with payload, send the call upwards for verification. // Or PUT and POST requests without payload, verify here. @@ -96,7 +96,8 @@ func (s signatureHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } } - ok, err := signature.DoesSignatureMatch(hex.EncodeToString(sha256.Sum256([]byte("")))) + dummySha256Bytes := sha256.Sum256([]byte("")) + ok, err := signature.DoesSignatureMatch(hex.EncodeToString(dummySha256Bytes[:])) if err != nil { errorIf(err.Trace(), "Unable to verify signature.", nil) writeErrorResponse(w, r, InternalError, r.URL.Path) diff --git a/signature_utils_test.go b/signature_utils_test.go index 408933098..8d747b5e3 100644 --- a/signature_utils_test.go +++ b/signature_utils_test.go @@ -24,7 +24,7 @@ import ( "strings" "unicode/utf8" - "github.com/minio/minio-xl/pkg/crypto/sha256" + "github.com/minio/minio/pkg/crypto/sha256" ) // sum256Reader calculate sha256 sum for an input read seeker diff --git a/update-main.go b/update-main.go index 4600fb3b2..10f6e8b6b 100644 --- a/update-main.go +++ b/update-main.go @@ -27,7 +27,7 @@ import ( "github.com/fatih/color" "github.com/minio/cli" - "github.com/minio/minio-xl/pkg/probe" + "github.com/minio/minio/pkg/probe" ) // command specific flags. diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_darwin.go b/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_darwin.go deleted file mode 100644 index 6dfb477fb..000000000 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_darwin.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build 386 amd64 arm -// +build darwin - -/* - * Minio Cloud Storage, (C) 2014 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "hash" - "io" - - "crypto/sha256" -) - -// Sum256 - single caller sha256 helper -func Sum256(data []byte) []byte { - d := sha256.New() - d.Write(data) - return d.Sum(nil) -} - -// Sum - io.Reader based streaming sha256 helper -func Sum(reader io.Reader) ([]byte, error) { - d := sha256.New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - d.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return d.Sum(nil), nil -} - -// New returns a new hash.Hash computing SHA256. -func New() hash.Hash { - return sha256.New() -} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_windows.go b/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_windows.go deleted file mode 100644 index 4585e2b75..000000000 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha256/sha256_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build 386 amd64 arm -// +build windows - -/* - * Minio Cloud Storage, (C) 2014 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha256 - -import ( - "hash" - "io" - - "crypto/sha256" -) - -// Sum256 - single caller sha256 helper -func Sum256(data []byte) []byte { - d := sha256.New() - d.Write(data) - return d.Sum(nil) -} - -// Sum - io.Reader based streaming sha256 helper -func Sum(reader io.Reader) ([]byte, error) { - d := sha256.New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - d.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return d.Sum(nil), nil -} - -// New returns a new hash.Hash computing SHA256. -func New() hash.Hash { - return sha256.New() -} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_darwin.go b/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_darwin.go deleted file mode 100644 index da971f4d4..000000000 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_darwin.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build 386 amd64 arm -// +build darwin - -/* - * Minio Cloud Storage, (C) 2014 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha512 - -import ( - "hash" - "io" - - "crypto/sha512" -) - -// The size of a SHA512 checksum in bytes. -const ( - Size = sha512.Size -) - -// Sum512 - single caller sha512 helper -func Sum512(data []byte) []byte { - d := sha512.New() - d.Write(data) - return d.Sum(nil) -} - -// Sum - io.Reader based streaming sha512 helper -func Sum(reader io.Reader) ([]byte, error) { - d := sha512.New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - d.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return d.Sum(nil), nil -} - -// New returns a new hash.Hash computing SHA512. -func New() hash.Hash { - return sha512.New() -} diff --git a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_windows.go b/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_windows.go deleted file mode 100644 index bebd60967..000000000 --- a/vendor/github.com/minio/minio-xl/pkg/crypto/sha512/sha512_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build 386 amd64 arm -// +build windows - -/* - * Minio Cloud Storage, (C) 2014 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sha512 - -import ( - "hash" - "io" - - "crypto/sha512" -) - -// The size of a SHA512 checksum in bytes. -const ( - Size = sha512.Size -) - -// Sum512 - single caller sha512 helper -func Sum512(data []byte) []byte { - d := sha512.New() - d.Write(data) - return d.Sum(nil) -} - -// Sum - io.Reader based streaming sha512 helper -func Sum(reader io.Reader) ([]byte, error) { - d := sha512.New() - var err error - for err == nil { - length := 0 - byteBuffer := make([]byte, 1024*1024) - length, err = reader.Read(byteBuffer) - byteBuffer = byteBuffer[0:length] - d.Write(byteBuffer) - } - if err != io.EOF { - return nil, err - } - return d.Sum(nil), nil -} - -// New returns a new hash.Hash computing SHA512. -func New() hash.Hash { - return sha512.New() -} diff --git a/vendor/vendor.json b/vendor/vendor.json index 2339b35e0..3fc96b389 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -62,41 +62,6 @@ "revision": "280f16a52008d3ebba1bd64398b9b082e6738386", "revisionTime": "2016-02-07T03:45:25-08:00" }, - { - "path": "github.com/minio/minio-xl/pkg/atomic", - "revision": "69c47f638917ab1cb9e24649c84ac38e6f1891b8", - "revisionTime": "2016-01-14T18:12:05-08:00" - }, - { - "path": "github.com/minio/minio-xl/pkg/cpu", - "revision": "a5fc6d2430ba2ebcab31b938ab02a42bac85dc2e", - "revisionTime": "2015-10-20T11:16:42-07:00" - }, - { - "path": "github.com/minio/minio-xl/pkg/crypto/sha256", - "revision": "69c47f638917ab1cb9e24649c84ac38e6f1891b8", - "revisionTime": "2016-01-14T18:12:05-08:00" - }, - { - "path": "github.com/minio/minio-xl/pkg/crypto/sha512", - "revision": "69c47f638917ab1cb9e24649c84ac38e6f1891b8", - "revisionTime": "2016-01-14T18:12:05-08:00" - }, - { - "path": "github.com/minio/minio-xl/pkg/minhttp", - "revision": "a5fc6d2430ba2ebcab31b938ab02a42bac85dc2e", - "revisionTime": "2015-10-20T11:16:42-07:00" - }, - { - "path": "github.com/minio/minio-xl/pkg/probe", - "revision": "a7b8623fd546965505f18172717393f5de4139a2", - "revisionTime": "2015-10-25T03:03:43-07:00" - }, - { - "path": "github.com/minio/minio-xl/pkg/quick", - "revision": "69c47f638917ab1cb9e24649c84ac38e6f1891b8", - "revisionTime": "2016-01-14T18:12:05-08:00" - }, { "path": "github.com/rs/cors", "revision": "eb527c8097e0f19a3ff7b253a3fe70545070f420", diff --git a/web-handlers.go b/web-handlers.go index 90d0d63b8..973447745 100644 --- a/web-handlers.go +++ b/web-handlers.go @@ -31,8 +31,8 @@ import ( jwtgo "github.com/dgrijalva/jwt-go" "github.com/dustin/go-humanize" "github.com/minio/minio-go" - "github.com/minio/minio-xl/pkg/probe" "github.com/minio/minio/pkg/disk" + "github.com/minio/minio/pkg/probe" ) // isAuthenticated validates if any incoming request to be a valid JWT