update gofumpt -w - new changes

This commit is contained in:
Harshavardhana 2022-04-13 12:00:11 -07:00
parent 7ce1f6e736
commit eda34423d7
21 changed files with 57 additions and 29 deletions

View file

@ -97,7 +97,8 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
}
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
r *http.Request, w http.ResponseWriter) {
r *http.Request, w http.ResponseWriter,
) {
// Apply dynamic values.
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)

View file

@ -341,7 +341,8 @@ func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength in
}
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
body io.ReadSeeker, t *testing.T) *http.Request {
body io.ReadSeeker, t *testing.T,
) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
cred := globalActiveCred

View file

@ -36,7 +36,8 @@ func TestRemoveBucketHandler(t *testing.T) {
}
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {

View file

@ -34,7 +34,8 @@ func TestBucketLifecycleWrongCredentials(t *testing.T) {
// Test for authentication
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
// test cases with sample input and expected output.
testCases := []struct {
method string

View file

@ -113,7 +113,8 @@ func TestCreateBucket(t *testing.T) {
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
bucketName1 := fmt.Sprintf("%s-1", bucketName)
const n = 100
@ -378,7 +379,8 @@ func TestGetBucketPolicyHandler(t *testing.T) {
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
// template for constructing HTTP request body for PUT bucket policy.
bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}`

View file

@ -60,7 +60,8 @@ func (fi FileInfo) DataShardFixed() bool {
// also heals the missing entries for bucket metadata files
// `policy.json, notification.xml, listeners.json`.
func (er erasureObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (
result madmin.HealResultItem, err error) {
result madmin.HealResultItem, err error,
) {
if !opts.DryRun {
defer NSUpdated(bucket, slashSeparator)
}

View file

@ -1099,7 +1099,8 @@ func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object str
// Copies a part of an object from source hashedSet to destination hashedSet.
func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) {
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions,
) (partInfo PartInfo, err error) {
destSet := s.getHashedSet(destObject)
auditObjectErasureSet(ctx, destObject, destSet)
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)

View file

@ -1362,13 +1362,15 @@ func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealRe
// HealObject - no-op for fs. Valid only for Erasure.
func (fs *FSObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (
res madmin.HealResultItem, err error) {
res madmin.HealResultItem, err error,
) {
return res, NotImplemented{}
}
// HealBucket - no-op for fs, Valid only for Erasure.
func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem,
error) {
error,
) {
return madmin.HealResultItem{}, NotImplemented{}
}

View file

@ -211,7 +211,8 @@ func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix stri
// CopyObject copies a blob from source container to destination container.
func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) {
srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions,
) (objInfo ObjectInfo, err error) {
return objInfo, NotImplemented{}
}

View file

@ -1072,7 +1072,8 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
}
func (a *azureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, uploadID string, partID int,
startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (info minio.PartInfo, err error) {
startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (info minio.PartInfo, err error) {
return a.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
}

View file

@ -929,7 +929,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r
// CopyObject - Copies a blob from source container to destination container.
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (minio.ObjectInfo, error) {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.ObjectInfo{}, minio.PreConditionFailed{}
}

View file

@ -536,7 +536,8 @@ func (n *hdfsObjects) deleteObject(basePath, deletePath string) error {
// ListObjectsV2 lists all blobs in HDFS bucket filtered by prefix
func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) {
fetchOwner bool, startAfter string,
) (loi minio.ListObjectsV2Info, err error) {
// fetchOwner is not supported and unused.
marker := continuationToken
if marker == "" {
@ -793,7 +794,8 @@ func (n *hdfsObjects) ListObjectParts(ctx context.Context, bucket, object, uploa
}
func (n *hdfsObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.PartInfo, error) {
startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (minio.PartInfo, error) {
return n.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
}

View file

@ -549,7 +549,8 @@ func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object
// CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it.
func (l *s3EncObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (p minio.PartInfo, err error) {
return l.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
}

View file

@ -640,7 +640,8 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str
// CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it.
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) {
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (p minio.PartInfo, err error) {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.PartInfo{}, minio.PreConditionFailed{}
}

View file

@ -1608,7 +1608,8 @@ func (sys *NotificationSys) Netperf(ctx context.Context, duration time.Duration)
// Speedtest run GET/PUT tests at input concurrency for requested object size,
// optionally you can extend the tests longer with time.Duration.
func (sys *NotificationSys) Speedtest(ctx context.Context, size int,
concurrent int, duration time.Duration, storageClass string) []SpeedtestResult {
concurrent int, duration time.Duration, storageClass string,
) []SpeedtestResult {
length := len(sys.allPeerClients)
if length == 0 {
// For single node erasure setup.

View file

@ -62,7 +62,8 @@ func TestAPIHeadObjectHandler(t *testing.T) {
}
func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
objectName := "test-object"
// set of byte data for PutObject.
// object has to be created before running tests for HeadObject.
@ -2592,7 +2593,8 @@ func TestAPINewMultipartHandlerParallel(t *testing.T) {
}
func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
// used for storing the uploadID's parsed on concurrent HTTP requests for NewMultipart upload on the same object.
testUploads := struct {
sync.Mutex
@ -3347,7 +3349,8 @@ func TestAPIPutObjectPartHandlerStreaming(t *testing.T) {
}
func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
testObject := "testobject"
rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, "testobject"),
@ -3748,7 +3751,8 @@ func TestAPIListObjectPartsHandlerPreSign(t *testing.T) {
}
func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
testObject := "testobject"
rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, testObject),
@ -3835,7 +3839,8 @@ func TestAPIListObjectPartsHandler(t *testing.T) {
}
func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
credentials auth.Credentials, t *testing.T,
) {
testObject := "testobject"
var opts ObjectOptions
// PutObjectPart API HTTP Handler has to be tested in isolation,

View file

@ -1039,7 +1039,8 @@ func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric
}
func (client *peerRESTClient) Speedtest(ctx context.Context, size,
concurrent int, duration time.Duration, storageClass string) (SpeedtestResult, error) {
concurrent int, duration time.Duration, storageClass string,
) (SpeedtestResult, error) {
values := make(url.Values)
values.Set(peerRESTSize, strconv.Itoa(size))
values.Set(peerRESTConcurrent, strconv.Itoa(concurrent))

View file

@ -581,7 +581,8 @@ func buildGenericPolicy(t time.Time, accessKey, region, bucketName, objectName s
}
func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string,
t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) {
t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool,
) (*http.Request, error) {
// Get the user credential.
credStr := getCredentialString(accessKey, region, t)

View file

@ -699,7 +699,8 @@ func jsonLoad(r io.ReadSeeker, data interface{}) error {
func jsonSave(f interface {
io.WriteSeeker
Truncate(int64) error
}, data interface{}) error {
}, data interface{},
) error {
b, err := json.Marshal(data)
if err != nil {
return err

View file

@ -114,7 +114,8 @@ func RegisterNotificationTargets(ctx context.Context, cfg config.Config, transpo
func fetchSubSysTargets(ctx context.Context, cfg config.Config,
transport *http.Transport, test bool, returnOnTargetError bool,
subSys string, targetList *event.TargetList) (targetsOffline bool, err error) {
subSys string, targetList *event.TargetList,
) (targetsOffline bool, err error) {
targetsOffline = false
if err := checkValidNotificationKeysForSubSys(subSys, cfg[subSys]); err != nil {
return targetsOffline, err
@ -1479,7 +1480,8 @@ var (
// GetNotifyWebhook - returns a map of registered notification 'webhook' targets
func GetNotifyWebhook(webhookKVS map[string]config.KVS, transport *http.Transport) (
map[string]target.WebhookArgs, error) {
map[string]target.WebhookArgs, error,
) {
webhookTargets := make(map[string]target.WebhookArgs)
for k, kv := range config.Merge(webhookKVS, target.EnvWebhookEnable, DefaultWebhookKVS) {
enableEnv := target.EnvWebhookEnable

View file

@ -15,7 +15,7 @@
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:build (linux && arm) || (linux && ppc64) || (linux && ppc64le) || (linux && s390x)|| (linux && riscv64)
//go:build (linux && arm) || (linux && ppc64) || (linux && ppc64le) || (linux && s390x) || (linux && riscv64)
// +build linux,arm linux,ppc64 linux,ppc64le linux,s390x linux,riscv64
package kernel