Skip to content

Commit 47540ce

Browse files
Add MemberDowngrade failpoint
Signed-off-by: Siyuan Zhang <[email protected]>
1 parent e0bbea9 commit 47540ce

File tree

4 files changed

+118
-1
lines changed

4 files changed

+118
-1
lines changed

tests/framework/e2e/curl.go

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,3 +128,10 @@ func CURLGet(clus *EtcdProcessCluster, req CURLReq) error {
128128

129129
return SpawnWithExpectsContext(ctx, CURLPrefixArgsCluster(clus.Cfg, clus.Procs[rand.Intn(clus.Cfg.ClusterSize)], "GET", req), nil, req.Expected)
130130
}
131+
132+
func CURLGetFromMember(clus *EtcdProcessCluster, member EtcdProcess, req CURLReq) error {
133+
ctx, cancel := context.WithTimeout(context.Background(), req.timeoutDuration())
134+
defer cancel()
135+
136+
return SpawnWithExpectsContext(ctx, CURLPrefixArgsCluster(clus.Cfg, member, "GET", req), nil, req.Expected)
137+
}

tests/robustness/failpoint/cluster.go

Lines changed: 108 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,18 +23,25 @@ import (
2323
"testing"
2424
"time"
2525

26+
"github.com/coreos/go-semver/semver"
2627
"github.com/stretchr/testify/require"
2728
"go.uber.org/zap"
2829

30+
pb "go.etcd.io/etcd/api/v3/etcdserverpb"
31+
"go.etcd.io/etcd/client/pkg/v3/fileutil"
2932
clientv3 "go.etcd.io/etcd/client/v3"
33+
"go.etcd.io/etcd/pkg/v3/expect"
3034
"go.etcd.io/etcd/server/v3/etcdserver"
3135
"go.etcd.io/etcd/tests/v3/framework/e2e"
3236
"go.etcd.io/etcd/tests/v3/robustness/identity"
3337
"go.etcd.io/etcd/tests/v3/robustness/report"
3438
"go.etcd.io/etcd/tests/v3/robustness/traffic"
3539
)
3640

37-
var MemberReplace Failpoint = memberReplace{}
41+
var (
42+
MemberReplace Failpoint = memberReplace{}
43+
MemberDowngrade Failpoint = memberDowngrade{}
44+
)
3845

3946
type memberReplace struct{}
4047

@@ -138,6 +145,80 @@ func (f memberReplace) Available(config e2e.EtcdProcessClusterConfig, member e2e
138145
return config.ClusterSize > 1 && (config.Version == e2e.QuorumLastVersion || member.Config().ExecPath == e2e.BinPath.Etcd)
139146
}
140147

148+
type memberDowngrade struct{}
149+
150+
func (f memberDowngrade) Inject(ctx context.Context, t *testing.T, lg *zap.Logger, clus *e2e.EtcdProcessCluster, baseTime time.Time, ids identity.Provider) ([]report.ClientReport, error) {
151+
v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
152+
if err != nil {
153+
return nil, err
154+
}
155+
targetVersion := semver.Version{Major: v.Major, Minor: v.Minor - 1}
156+
numberOfMembersToDowngrade := rand.Int()%len(clus.Procs) + 1
157+
membersToDowngrade := rand.Perm(len(clus.Procs))[:numberOfMembersToDowngrade]
158+
lg.Info("Test downgrading members", zap.Any("members", membersToDowngrade))
159+
160+
member := clus.Procs[0]
161+
endpoints := []string{member.EndpointsGRPC()[0]}
162+
cc, err := clientv3.New(clientv3.Config{
163+
Endpoints: endpoints,
164+
Logger: zap.NewNop(),
165+
DialKeepAliveTime: 10 * time.Second,
166+
DialKeepAliveTimeout: 100 * time.Millisecond,
167+
})
168+
if err != nil {
169+
return nil, err
170+
}
171+
defer cc.Close()
172+
173+
// Need to wait health interval for cluster to accept changes
174+
time.Sleep(etcdserver.HealthInterval)
175+
lg.Info("Enable downgrade")
176+
err = enableDowngrade(ctx, cc, &targetVersion)
177+
if err != nil {
178+
return nil, err
179+
}
180+
// Need to wait health interval for cluster to prepare for downgrade
181+
time.Sleep(etcdserver.HealthInterval)
182+
183+
for _, memberID := range membersToDowngrade {
184+
member = clus.Procs[memberID]
185+
lg.Info("Downgrading member", zap.String("member", member.Config().Name))
186+
if err = member.Stop(); err != nil {
187+
return nil, err
188+
}
189+
member.Config().ExecPath = e2e.BinPath.EtcdLastRelease
190+
err = patchArgs(member.Config().Args, "initial-cluster-state", "existing")
191+
if err != nil {
192+
return nil, err
193+
}
194+
lg.Info("Restarting member", zap.String("member", member.Config().Name))
195+
err = member.Start(ctx)
196+
if err != nil {
197+
return nil, err
198+
}
199+
err = verifyVersion(t, clus, member, targetVersion)
200+
}
201+
time.Sleep(etcdserver.HealthInterval)
202+
return nil, err
203+
}
204+
205+
func (f memberDowngrade) Name() string {
206+
return "MemberDowngrade"
207+
}
208+
209+
func (f memberDowngrade) Available(config e2e.EtcdProcessClusterConfig, member e2e.EtcdProcess, profile traffic.Profile) bool {
210+
if !fileutil.Exist(e2e.BinPath.EtcdLastRelease) {
211+
return false
212+
}
213+
v, err := e2e.GetVersionFromBinary(e2e.BinPath.Etcd)
214+
if err != nil {
215+
panic("Failed checking etcd version binary")
216+
}
217+
v3_6 := semver.Version{Major: 3, Minor: 6}
218+
// only current version cluster can be downgraded.
219+
return v.Compare(v3_6) >= 0 && (config.Version == e2e.CurrentVersion && member.Config().ExecPath == e2e.BinPath.Etcd)
220+
}
221+
141222
func getID(ctx context.Context, cc *clientv3.Client, name string) (id uint64, found bool, err error) {
142223
// Ensure linearized MemberList by first making a linearized Get request from the same member.
143224
// This is required for v3.4 support as it doesn't support linearized MemberList https://github.com/etcd-io/etcd/issues/18929
@@ -170,3 +251,29 @@ func patchArgs(args []string, flag, newValue string) error {
170251
}
171252
return fmt.Errorf("--%s flag not found", flag)
172253
}
254+
255+
func enableDowngrade(ctx context.Context, cc *clientv3.Client, targetVersion *semver.Version) error {
256+
_, err := cc.Maintenance.Downgrade(ctx, clientv3.DowngradeAction(pb.DowngradeRequest_VALIDATE), targetVersion.String())
257+
if err != nil {
258+
return err
259+
}
260+
_, err = cc.Maintenance.Downgrade(ctx, clientv3.DowngradeAction(pb.DowngradeRequest_ENABLE), targetVersion.String())
261+
return err
262+
}
263+
264+
func verifyVersion(t *testing.T, clus *e2e.EtcdProcessCluster, member e2e.EtcdProcess, expectedVersion semver.Version) error {
265+
var err error
266+
expected := fmt.Sprintf(`"etcdserver":"%d.%d\..*"etcdcluster":"%d\.%d\.`, expectedVersion.Major, expectedVersion.Minor, expectedVersion.Major, expectedVersion.Minor)
267+
for i := 0; i < 35; i++ {
268+
if err = e2e.CURLGetFromMember(clus, member, e2e.CURLReq{Endpoint: "/version", Expected: expect.ExpectedResponse{Value: expected, IsRegularExpr: true}}); err != nil {
269+
t.Logf("#%d: v3 is not ready yet (%v)", i, err)
270+
time.Sleep(200 * time.Millisecond)
271+
continue
272+
}
273+
break
274+
}
275+
if err != nil {
276+
return fmt.Errorf("failed to verify version, expected %v got (%w)", expected, err)
277+
}
278+
return nil
279+
}

tests/robustness/failpoint/failpoint.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ var allFailpoints = []Failpoint{
4646
RaftBeforeSaveSnapPanic, RaftAfterSaveSnapPanic, BlackholeUntilSnapshot,
4747
BeforeApplyOneConfChangeSleep,
4848
MemberReplace,
49+
MemberDowngrade,
4950
DropPeerNetwork,
5051
RaftBeforeSaveSleep,
5152
RaftAfterSaveSleep,

tests/robustness/report/wal.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -183,6 +183,8 @@ func parseEntryNormal(ent raftpb.Entry) (*model.EtcdRequest, error) {
183183
return nil, nil
184184
case raftReq.ClusterVersionSet != nil:
185185
return nil, nil
186+
case raftReq.DowngradeInfoSet != nil:
187+
return nil, nil
186188
case raftReq.Compaction != nil:
187189
request := model.EtcdRequest{
188190
Type: model.Compact,

0 commit comments

Comments
 (0)