mirror of
https://github.com/gravitational/teleport
synced 2024-10-19 00:33:50 +00:00
Replace testify/assert with testify/require (#9925)
This commit is contained in:
parent
61d2ae52d2
commit
191d631c38
|
@ -27,7 +27,6 @@ import (
|
|||
"github.com/gravitational/trace"
|
||||
|
||||
"github.com/jonboulle/clockwork"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
adminpb "google.golang.org/genproto/googleapis/firestore/admin/v1"
|
||||
"google.golang.org/protobuf/proto"
|
||||
|
@ -46,10 +45,10 @@ func TestMain(m *testing.M) {
|
|||
func TestMarshal(t *testing.T) {
|
||||
meta := adminpb.IndexOperationMetadata{}
|
||||
data, err := proto.Marshal(&meta)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
out := adminpb.IndexOperationMetadata{}
|
||||
err = proto.Unmarshal(data, &out)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func firestoreParams() backend.Params {
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/gravitational/teleport/lib/backend/memory"
|
||||
"github.com/gravitational/teleport/lib/services"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// BenchmarkGetNodes verifies the performance of the GetNodes operation
|
||||
|
@ -68,16 +68,16 @@ func BenchmarkGetNodes(b *testing.B) {
|
|||
var err error
|
||||
if tt.memory {
|
||||
bk, err = memory.New(memory.Config{})
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
} else {
|
||||
dir, err := ioutil.TempDir("", "teleport")
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
bk, err = lite.NewWithConfig(context.TODO(), lite.Config{
|
||||
Path: dir,
|
||||
})
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
defer bk.Close()
|
||||
|
||||
|
@ -95,7 +95,7 @@ func BenchmarkGetNodes(b *testing.B) {
|
|||
}
|
||||
|
||||
// insertNodes inserts a collection of test nodes into a backend.
|
||||
func insertNodes(ctx context.Context, t assert.TestingT, svc services.Presence, nodeCount int) {
|
||||
func insertNodes(ctx context.Context, b *testing.B, svc services.Presence, nodeCount int) {
|
||||
const labelCount = 10
|
||||
labels := make(map[string]string, labelCount)
|
||||
for i := 0; i < labelCount; i++ {
|
||||
|
@ -117,7 +117,7 @@ func insertNodes(ctx context.Context, t assert.TestingT, svc services.Presence,
|
|||
},
|
||||
}
|
||||
_, err := svc.UpsertNode(ctx, node)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,10 +127,10 @@ func benchmarkGetNodes(ctx context.Context, b *testing.B, svc services.Presence,
|
|||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
nodes, err = svc.GetNodes(ctx, apidefaults.Namespace, opts...)
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
// do *something* with the loop result. probably unnecessary since the loop
|
||||
// contains I/O, but I don't know enough about the optimizer to be 100% certain
|
||||
// about that.
|
||||
assert.Equal(b, nodeCount, len(nodes))
|
||||
require.Equal(b, nodeCount, len(nodes))
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/siddontang/go-mysql/client"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.mongodb.org/mongo-driver/mongo"
|
||||
)
|
||||
|
@ -120,7 +119,7 @@ func TestDatabaseServerLimiting(t *testing.T) {
|
|||
// We keep the previous connections open, so this one should be rejected, because we exhausted the limit.
|
||||
_, err = testCtx.postgresClient(ctx, user, "postgres", dbUser, dbName)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeded connection limit")
|
||||
require.Contains(t, err.Error(), "exceeded connection limit")
|
||||
})
|
||||
|
||||
t.Run("mysql", func(t *testing.T) {
|
||||
|
@ -144,7 +143,7 @@ func TestDatabaseServerLimiting(t *testing.T) {
|
|||
// We keep the previous connections open, so this one should be rejected, because we exhausted the limit.
|
||||
_, err = testCtx.mysqlClient(user, "mysql", dbUser)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "exceeded connection limit")
|
||||
require.Contains(t, err.Error(), "exceeded connection limit")
|
||||
})
|
||||
|
||||
t.Run("mongodb", func(t *testing.T) {
|
||||
|
@ -168,7 +167,7 @@ func TestDatabaseServerLimiting(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
assert.Contains(t, err.Error(), "exceeded connection limit")
|
||||
require.Contains(t, err.Error(), "exceeded connection limit")
|
||||
// When we hit the expected error we can exit.
|
||||
return
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
"github.com/gravitational/teleport/lib/services/local"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const clusterName = "bench.example.com"
|
||||
|
@ -77,16 +77,16 @@ func BenchmarkGetClusterDetails(b *testing.B) {
|
|||
var err error
|
||||
if tt.memory {
|
||||
bk, err = memory.New(memory.Config{})
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
} else {
|
||||
dir, err := ioutil.TempDir("", "teleport")
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
bk, err = lite.NewWithConfig(context.TODO(), lite.Config{
|
||||
Path: dir,
|
||||
})
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
defer bk.Close()
|
||||
|
||||
|
@ -113,7 +113,7 @@ func BenchmarkGetClusterDetails(b *testing.B) {
|
|||
}
|
||||
|
||||
// insertServers inserts a collection of servers into a backend.
|
||||
func insertServers(ctx context.Context, t assert.TestingT, svc services.Presence, kind string, count int) {
|
||||
func insertServers(ctx context.Context, b *testing.B, svc services.Presence, kind string, count int) {
|
||||
const labelCount = 10
|
||||
labels := make(map[string]string, labelCount)
|
||||
for i := 0; i < labelCount; i++ {
|
||||
|
@ -145,9 +145,9 @@ func insertServers(ctx context.Context, t assert.TestingT, svc services.Presence
|
|||
case types.KindAuthServer:
|
||||
err = svc.UpsertAuthServer(server)
|
||||
default:
|
||||
t.Errorf("Unexpected server kind: %s", kind)
|
||||
b.Errorf("Unexpected server kind: %s", kind)
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -156,10 +156,10 @@ func benchmarkGetClusterDetails(ctx context.Context, b *testing.B, site reverset
|
|||
var err error
|
||||
for i := 0; i < b.N; i++ {
|
||||
cluster, err = GetClusterDetails(ctx, site, opts...)
|
||||
assert.NoError(b, err)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
assert.NotNil(b, cluster)
|
||||
assert.Equal(b, nodes, cluster.NodeCount)
|
||||
require.NotNil(b, cluster)
|
||||
require.Equal(b, nodes, cluster.NodeCount)
|
||||
}
|
||||
|
||||
type mockRemoteSite struct {
|
||||
|
|
Loading…
Reference in a new issue