Skip to content

Commit

Permalink
Try to reproduce etcd-io#19179
Browse files Browse the repository at this point in the history
Signed-off-by: Marek Siarkowicz <[email protected]>
  • Loading branch information
serathius committed Jan 16, 2025
1 parent 9eb85ee commit 54813cf
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 178 deletions.
186 changes: 12 additions & 174 deletions tests/robustness/scenarios/scenarios.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,20 +15,11 @@
package scenarios

import (
"path/filepath"
"testing"
"time"

"github.com/stretchr/testify/require"

"go.etcd.io/etcd/api/v3/version"
"go.etcd.io/etcd/client/pkg/v3/fileutil"
"go.etcd.io/etcd/server/v3/etcdserver"
"go.etcd.io/etcd/tests/v3/framework/e2e"
"go.etcd.io/etcd/tests/v3/robustness/client"
"go.etcd.io/etcd/tests/v3/robustness/failpoint"
"go.etcd.io/etcd/tests/v3/robustness/options"
"go.etcd.io/etcd/tests/v3/robustness/random"
"go.etcd.io/etcd/tests/v3/robustness/traffic"
)

Expand Down Expand Up @@ -70,175 +61,22 @@ type TestScenario struct {
Watch client.WatchConfig
}

func Exploratory(_ *testing.T) []TestScenario {
randomizableOptions := []e2e.EPClusterOption{
options.WithClusterOptionGroups(
options.ClusterOptions{options.WithTickMs(29), options.WithElectionMs(271)},
options.ClusterOptions{options.WithTickMs(101), options.WithElectionMs(521)},
options.ClusterOptions{options.WithTickMs(100), options.WithElectionMs(2000)}),
}

mixedVersionOptionChoices := []random.ChoiceWeight[options.ClusterOptions]{
// 60% with all members of current version
{Choice: options.ClusterOptions{options.WithVersion(e2e.CurrentVersion)}, Weight: 60},
// 10% with 2 members of current version, 1 member last version, leader is current version
{Choice: options.ClusterOptions{options.WithVersion(e2e.MinorityLastVersion), options.WithInitialLeaderIndex(0)}, Weight: 10},
// 10% with 2 members of current version, 1 member last version, leader is last version
{Choice: options.ClusterOptions{options.WithVersion(e2e.MinorityLastVersion), options.WithInitialLeaderIndex(2)}, Weight: 10},
// 10% with 2 members of last version, 1 member current version, leader is last version
{Choice: options.ClusterOptions{options.WithVersion(e2e.QuorumLastVersion), options.WithInitialLeaderIndex(0)}, Weight: 10},
// 10% with 2 members of last version, 1 member current version, leader is current version
{Choice: options.ClusterOptions{options.WithVersion(e2e.QuorumLastVersion), options.WithInitialLeaderIndex(2)}, Weight: 10},
}
mixedVersionOption := options.WithClusterOptionGroups(random.PickRandom[options.ClusterOptions](mixedVersionOptionChoices))

baseOptions := []e2e.EPClusterOption{
options.WithSnapshotCount(50, 100, 1000),
options.WithSubsetOptions(randomizableOptions...),
e2e.WithGoFailEnabled(true),
// Set low minimal compaction batch limit to allow for triggering multi batch compaction failpoints.
options.WithCompactionBatchLimit(10, 100, 1000),
e2e.WithWatchProcessNotifyInterval(100 * time.Millisecond),
}

if e2e.CouldSetSnapshotCatchupEntries(e2e.BinPath.Etcd) {
baseOptions = append(baseOptions, options.WithSnapshotCatchUpEntries(100, etcdserver.DefaultSnapshotCatchUpEntries))
}
scenarios := []TestScenario{}
for _, tp := range trafficProfiles {
name := filepath.Join(tp.Name, "ClusterOfSize1")
clusterOfSize1Options := baseOptions
clusterOfSize1Options = append(clusterOfSize1Options, e2e.WithClusterSize(1))
scenarios = append(scenarios, TestScenario{
Name: name,
Traffic: tp.Traffic,
Profile: tp.Profile,
Cluster: *e2e.NewConfig(clusterOfSize1Options...),
})
}

for _, tp := range trafficProfiles {
name := filepath.Join(tp.Name, "ClusterOfSize3")
clusterOfSize3Options := baseOptions
clusterOfSize3Options = append(clusterOfSize3Options, e2e.WithIsPeerTLS(true))
clusterOfSize3Options = append(clusterOfSize3Options, e2e.WithPeerProxy(true))
if fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
clusterOfSize3Options = append(clusterOfSize3Options, mixedVersionOption)
}
scenarios = append(scenarios, TestScenario{
Name: name,
Traffic: tp.Traffic,
Profile: tp.Profile,
Cluster: *e2e.NewConfig(clusterOfSize3Options...),
})
}
if e2e.BinPath.LazyFSAvailable() {
newScenarios := scenarios
for _, s := range scenarios {
// LazyFS increases the load on CPU, so we run it with more lightweight case.
if s.Profile.MinimalQPS <= 100 && s.Cluster.ClusterSize == 1 {
lazyfsCluster := s.Cluster
lazyfsCluster.LazyFSEnabled = true
newScenarios = append(newScenarios, TestScenario{
Name: filepath.Join(s.Name, "LazyFS"),
Failpoint: s.Failpoint,
Cluster: lazyfsCluster,
Traffic: s.Traffic,
Profile: s.Profile.WithoutCompaction(),
Watch: s.Watch,
})
}
}
scenarios = newScenarios
}
func Exploratory(_ *testing.T) (scenarios []TestScenario) {
return scenarios
}

func Regression(t *testing.T) []TestScenario {
v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
require.NoErrorf(t, err, "Failed checking etcd version binary, binary: %q", e2e.BinPath.Etcd)

scenarios := []TestScenario{}
scenarios = append(scenarios, TestScenario{
Name: "Issue14370",
Failpoint: failpoint.RaftBeforeSavePanic,
Profile: traffic.LowTraffic,
Traffic: traffic.EtcdPutDeleteLease,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
})
scenarios = append(scenarios, TestScenario{
Name: "Issue14685",
Failpoint: failpoint.DefragBeforeCopyPanic,
Profile: traffic.LowTraffic,
Traffic: traffic.EtcdPutDeleteLease,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
),
})
scenarios = append(scenarios, TestScenario{
Name: "Issue13766",
Failpoint: failpoint.KillFailpoint,
Profile: traffic.HighTrafficProfile,
Traffic: traffic.EtcdPut,
Cluster: *e2e.NewConfig(
e2e.WithSnapshotCount(100),
),
})
scenarios = append(scenarios, TestScenario{
Name: "Issue15220",
Watch: client.WatchConfig{
RequestProgress: true,
},
Profile: traffic.LowTraffic,
Traffic: traffic.EtcdPutDeleteLease,
Failpoint: failpoint.KillFailpoint,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
),
})
scenarios = append(scenarios, TestScenario{
Name: "Issue17529",
Profile: traffic.HighTrafficProfile,
Traffic: traffic.Kubernetes,
Failpoint: failpoint.SleepBeforeSendWatchResponse,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithGoFailEnabled(true),
options.WithSnapshotCount(100),
),
})

scenarios = append(scenarios, TestScenario{
Name: "Issue17780",
Profile: traffic.LowTraffic.WithoutCompaction(),
Failpoint: failpoint.BatchCompactBeforeSetFinishedCompactPanic,
Traffic: traffic.Kubernetes,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithCompactionBatchLimit(300),
e2e.WithSnapshotCount(1000),
e2e.WithGoFailEnabled(true),
),
})
if v.Compare(version.V3_5) >= 0 {
opts := []e2e.EPClusterOption{
e2e.WithSnapshotCount(100),
e2e.WithPeerProxy(true),
e2e.WithIsPeerTLS(true),
}
if e2e.CouldSetSnapshotCatchupEntries(e2e.BinPath.Etcd) {
opts = append(opts, e2e.WithSnapshotCatchUpEntries(100))
}
func Regression(t *testing.T) (scenarios []TestScenario) {
for i := 0; i < 20; i++ {
scenarios = append(scenarios, TestScenario{
Name: "Issue15271",
Failpoint: failpoint.BlackholeUntilSnapshot,
Profile: traffic.HighTrafficProfile,
Traffic: traffic.EtcdPut,
Cluster: *e2e.NewConfig(opts...),
Name: "Issue19179",
Profile: traffic.LowTraffic.WithoutCompaction(),
Failpoint: failpoint.BatchCompactBeforeSetFinishedCompactPanic,
Traffic: traffic.Kubernetes,
Cluster: *e2e.NewConfig(
e2e.WithClusterSize(1),
e2e.WithCompactionBatchLimit(100),
e2e.WithGoFailEnabled(true),
),
})
}
return scenarios
Expand Down
7 changes: 3 additions & 4 deletions tests/robustness/traffic/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,13 @@ import (
)

var Kubernetes Traffic = kubernetesTraffic{
averageKeyCount: 10,
averageKeyCount: 100,
resource: "pods",
namespace: "default",
// Please keep the sum of weights equal 100.
writeChoices: []random.ChoiceWeight[KubernetesRequestType]{
{Choice: KubernetesUpdate, Weight: 90},
{Choice: KubernetesDelete, Weight: 5},
{Choice: KubernetesCreate, Weight: 5},
{Choice: KubernetesDelete, Weight: 50},
{Choice: KubernetesCreate, Weight: 50},
},
}

Expand Down

0 comments on commit 54813cf

Please sign in to comment.