update gofumpt -w - new changes

This commit is contained in:
Harshavardhana 2022-04-13 12:00:11 -07:00
parent 7ce1f6e736
commit eda34423d7
21 changed files with 57 additions and 29 deletions

View file

@ -97,7 +97,8 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
} }
func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string, func applyDynamic(ctx context.Context, objectAPI ObjectLayer, cfg config.Config, subSys string,
r *http.Request, w http.ResponseWriter) { r *http.Request, w http.ResponseWriter,
) {
// Apply dynamic values. // Apply dynamic values.
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil { if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, cfg, subSys); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)

View file

@ -341,7 +341,8 @@ func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength in
} }
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64, func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
body io.ReadSeeker, t *testing.T) *http.Request { body io.ReadSeeker, t *testing.T,
) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t) req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==") req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
cred := globalActiveCred cred := globalActiveCred

View file

@ -36,7 +36,8 @@ func TestRemoveBucketHandler(t *testing.T) {
} }
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{}) _, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewReader([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
// if object upload fails stop the test. // if object upload fails stop the test.
if err != nil { if err != nil {

View file

@ -34,7 +34,8 @@ func TestBucketLifecycleWrongCredentials(t *testing.T) {
// Test for authentication // Test for authentication
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
// test cases with sample input and expected output. // test cases with sample input and expected output.
testCases := []struct { testCases := []struct {
method string method string

View file

@ -113,7 +113,8 @@ func TestCreateBucket(t *testing.T) {
// testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success. // testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success.
func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
bucketName1 := fmt.Sprintf("%s-1", bucketName) bucketName1 := fmt.Sprintf("%s-1", bucketName)
const n = 100 const n = 100
@ -378,7 +379,8 @@ func TestGetBucketPolicyHandler(t *testing.T) {
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket. // testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
// template for constructing HTTP request body for PUT bucket policy. // template for constructing HTTP request body for PUT bucket policy.
bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}` bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}`

View file

@ -60,7 +60,8 @@ func (fi FileInfo) DataShardFixed() bool {
// also heals the missing entries for bucket metadata files // also heals the missing entries for bucket metadata files
// `policy.json, notification.xml, listeners.json`. // `policy.json, notification.xml, listeners.json`.
func (er erasureObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) ( func (er erasureObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (
result madmin.HealResultItem, err error) { result madmin.HealResultItem, err error,
) {
if !opts.DryRun { if !opts.DryRun {
defer NSUpdated(bucket, slashSeparator) defer NSUpdated(bucket, slashSeparator)
} }

View file

@ -1099,7 +1099,8 @@ func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object str
// Copies a part of an object from source hashedSet to destination hashedSet. // Copies a part of an object from source hashedSet to destination hashedSet.
func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int, func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (partInfo PartInfo, err error) { startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions,
) (partInfo PartInfo, err error) {
destSet := s.getHashedSet(destObject) destSet := s.getHashedSet(destObject)
auditObjectErasureSet(ctx, destObject, destSet) auditObjectErasureSet(ctx, destObject, destSet)
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts) return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)

View file

@ -1362,13 +1362,15 @@ func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealRe
// HealObject - no-op for fs. Valid only for Erasure. // HealObject - no-op for fs. Valid only for Erasure.
func (fs *FSObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) ( func (fs *FSObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (
res madmin.HealResultItem, err error) { res madmin.HealResultItem, err error,
) {
return res, NotImplemented{} return res, NotImplemented{}
} }
// HealBucket - no-op for fs, Valid only for Erasure. // HealBucket - no-op for fs, Valid only for Erasure.
func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem,
error) { error,
) {
return madmin.HealResultItem{}, NotImplemented{} return madmin.HealResultItem{}, NotImplemented{}
} }

View file

@ -211,7 +211,8 @@ func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix stri
// CopyObject copies a blob from source container to destination container. // CopyObject copies a blob from source container to destination container.
func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (objInfo ObjectInfo, err error) { srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions,
) (objInfo ObjectInfo, err error) {
return objInfo, NotImplemented{} return objInfo, NotImplemented{}
} }

View file

@ -1072,7 +1072,8 @@ func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object st
} }
func (a *azureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, uploadID string, partID int, func (a *azureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, uploadID string, partID int,
startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (info minio.PartInfo, err error) { startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (info minio.PartInfo, err error) {
return a.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) return a.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
} }

View file

@ -929,7 +929,8 @@ func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r
// CopyObject - Copies a blob from source container to destination container. // CopyObject - Copies a blob from source container to destination container.
func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string,
srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) { srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (minio.ObjectInfo, error) {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) { if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.ObjectInfo{}, minio.PreConditionFailed{} return minio.ObjectInfo{}, minio.PreConditionFailed{}
} }

View file

@ -536,7 +536,8 @@ func (n *hdfsObjects) deleteObject(basePath, deletePath string) error {
// ListObjectsV2 lists all blobs in HDFS bucket filtered by prefix // ListObjectsV2 lists all blobs in HDFS bucket filtered by prefix
func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int,
fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { fetchOwner bool, startAfter string,
) (loi minio.ListObjectsV2Info, err error) {
// fetchOwner is not supported and unused. // fetchOwner is not supported and unused.
marker := continuationToken marker := continuationToken
if marker == "" { if marker == "" {
@ -793,7 +794,8 @@ func (n *hdfsObjects) ListObjectParts(ctx context.Context, bucket, object, uploa
} }
func (n *hdfsObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, func (n *hdfsObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int,
startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.PartInfo, error) { startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (minio.PartInfo, error) {
return n.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) return n.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
} }

View file

@ -549,7 +549,8 @@ func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object
// CopyObjectPart creates a part in a multipart upload by copying // CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it. // existing object or a part of it.
func (l *s3EncObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, func (l *s3EncObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) { partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (p minio.PartInfo, err error) {
return l.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) return l.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.PutObjReader, dstOpts)
} }

View file

@ -640,7 +640,8 @@ func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object str
// CopyObjectPart creates a part in a multipart upload by copying // CopyObjectPart creates a part in a multipart upload by copying
// existing object or a part of it. // existing object or a part of it.
func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string,
partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) { partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions,
) (p minio.PartInfo, err error) {
if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) { if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) {
return minio.PartInfo{}, minio.PreConditionFailed{} return minio.PartInfo{}, minio.PreConditionFailed{}
} }

View file

@ -1608,7 +1608,8 @@ func (sys *NotificationSys) Netperf(ctx context.Context, duration time.Duration)
// Speedtest run GET/PUT tests at input concurrency for requested object size, // Speedtest run GET/PUT tests at input concurrency for requested object size,
// optionally you can extend the tests longer with time.Duration. // optionally you can extend the tests longer with time.Duration.
func (sys *NotificationSys) Speedtest(ctx context.Context, size int, func (sys *NotificationSys) Speedtest(ctx context.Context, size int,
concurrent int, duration time.Duration, storageClass string) []SpeedtestResult { concurrent int, duration time.Duration, storageClass string,
) []SpeedtestResult {
length := len(sys.allPeerClients) length := len(sys.allPeerClients)
if length == 0 { if length == 0 {
// For single node erasure setup. // For single node erasure setup.

View file

@ -62,7 +62,8 @@ func TestAPIHeadObjectHandler(t *testing.T) {
} }
func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
objectName := "test-object" objectName := "test-object"
// set of byte data for PutObject. // set of byte data for PutObject.
// object has to be created before running tests for HeadObject. // object has to be created before running tests for HeadObject.
@ -2592,7 +2593,8 @@ func TestAPINewMultipartHandlerParallel(t *testing.T) {
} }
func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
// used for storing the uploadID's parsed on concurrent HTTP requests for NewMultipart upload on the same object. // used for storing the uploadID's parsed on concurrent HTTP requests for NewMultipart upload on the same object.
testUploads := struct { testUploads := struct {
sync.Mutex sync.Mutex
@ -3347,7 +3349,8 @@ func TestAPIPutObjectPartHandlerStreaming(t *testing.T) {
} }
func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
testObject := "testobject" testObject := "testobject"
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, "testobject"), req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, "testobject"),
@ -3748,7 +3751,8 @@ func TestAPIListObjectPartsHandlerPreSign(t *testing.T) {
} }
func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
testObject := "testobject" testObject := "testobject"
rec := httptest.NewRecorder() rec := httptest.NewRecorder()
req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, testObject), req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, testObject),
@ -3835,7 +3839,8 @@ func TestAPIListObjectPartsHandler(t *testing.T) {
} }
func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) { credentials auth.Credentials, t *testing.T,
) {
testObject := "testobject" testObject := "testobject"
var opts ObjectOptions var opts ObjectOptions
// PutObjectPart API HTTP Handler has to be tested in isolation, // PutObjectPart API HTTP Handler has to be tested in isolation,

View file

@ -1039,7 +1039,8 @@ func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric
} }
func (client *peerRESTClient) Speedtest(ctx context.Context, size, func (client *peerRESTClient) Speedtest(ctx context.Context, size,
concurrent int, duration time.Duration, storageClass string) (SpeedtestResult, error) { concurrent int, duration time.Duration, storageClass string,
) (SpeedtestResult, error) {
values := make(url.Values) values := make(url.Values)
values.Set(peerRESTSize, strconv.Itoa(size)) values.Set(peerRESTSize, strconv.Itoa(size))
values.Set(peerRESTConcurrent, strconv.Itoa(concurrent)) values.Set(peerRESTConcurrent, strconv.Itoa(concurrent))

View file

@ -581,7 +581,8 @@ func buildGenericPolicy(t time.Time, accessKey, region, bucketName, objectName s
} }
func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string, func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData []byte, accessKey, secretKey string, region string,
t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool) (*http.Request, error) { t time.Time, policy []byte, addFormData map[string]string, corruptedB64 bool, corruptedMultipart bool,
) (*http.Request, error) {
// Get the user credential. // Get the user credential.
credStr := getCredentialString(accessKey, region, t) credStr := getCredentialString(accessKey, region, t)

View file

@ -699,7 +699,8 @@ func jsonLoad(r io.ReadSeeker, data interface{}) error {
func jsonSave(f interface { func jsonSave(f interface {
io.WriteSeeker io.WriteSeeker
Truncate(int64) error Truncate(int64) error
}, data interface{}) error { }, data interface{},
) error {
b, err := json.Marshal(data) b, err := json.Marshal(data)
if err != nil { if err != nil {
return err return err

View file

@ -114,7 +114,8 @@ func RegisterNotificationTargets(ctx context.Context, cfg config.Config, transpo
func fetchSubSysTargets(ctx context.Context, cfg config.Config, func fetchSubSysTargets(ctx context.Context, cfg config.Config,
transport *http.Transport, test bool, returnOnTargetError bool, transport *http.Transport, test bool, returnOnTargetError bool,
subSys string, targetList *event.TargetList) (targetsOffline bool, err error) { subSys string, targetList *event.TargetList,
) (targetsOffline bool, err error) {
targetsOffline = false targetsOffline = false
if err := checkValidNotificationKeysForSubSys(subSys, cfg[subSys]); err != nil { if err := checkValidNotificationKeysForSubSys(subSys, cfg[subSys]); err != nil {
return targetsOffline, err return targetsOffline, err
@ -1479,7 +1480,8 @@ var (
// GetNotifyWebhook - returns a map of registered notification 'webhook' targets // GetNotifyWebhook - returns a map of registered notification 'webhook' targets
func GetNotifyWebhook(webhookKVS map[string]config.KVS, transport *http.Transport) ( func GetNotifyWebhook(webhookKVS map[string]config.KVS, transport *http.Transport) (
map[string]target.WebhookArgs, error) { map[string]target.WebhookArgs, error,
) {
webhookTargets := make(map[string]target.WebhookArgs) webhookTargets := make(map[string]target.WebhookArgs)
for k, kv := range config.Merge(webhookKVS, target.EnvWebhookEnable, DefaultWebhookKVS) { for k, kv := range config.Merge(webhookKVS, target.EnvWebhookEnable, DefaultWebhookKVS) {
enableEnv := target.EnvWebhookEnable enableEnv := target.EnvWebhookEnable

View file

@ -15,7 +15,7 @@
// You should have received a copy of the GNU Affero General Public License // You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>. // along with this program. If not, see <http://www.gnu.org/licenses/>.
//go:build (linux && arm) || (linux && ppc64) || (linux && ppc64le) || (linux && s390x)|| (linux && riscv64) //go:build (linux && arm) || (linux && ppc64) || (linux && ppc64le) || (linux && s390x) || (linux && riscv64)
// +build linux,arm linux,ppc64 linux,ppc64le linux,s390x linux,riscv64 // +build linux,arm linux,ppc64 linux,ppc64le linux,s390x linux,riscv64
package kernel package kernel