@@ -175,7 +175,7 @@ func TestLeaseGrantKeepAliveOnce(t *testing.T) {
175
175
176
176
for _ , tc := range clusterTestCases () {
177
177
t .Run (tc .name , func (t * testing.T ) {
178
- ctx , cancel := context .WithTimeout (context .Background (), 10 * time .Second )
178
+ ctx , cancel := context .WithTimeout (context .Background (), 15 * time .Second )
179
179
defer cancel ()
180
180
clus := testRunner .NewCluster (ctx , t , config .WithClusterConfig (tc .config ))
181
181
defer clus .Close ()
@@ -188,7 +188,23 @@ func TestLeaseGrantKeepAliveOnce(t *testing.T) {
188
188
_ , err = cc .KeepAliveOnce (ctx , leaseResp .ID )
189
189
require .NoError (t , err )
190
190
191
- time .Sleep (2 * time .Second ) // Wait for the original lease to expire
191
+ // FIXME: When leader changes, old leader steps
192
+ // back to follower and ignores the lease revoking.
193
+ // The new leader will restart TTL counting. If so,
194
+ // we should call time.Sleep again and wait for revoking.
195
+ // It can't avoid flakey but reduce flakey possiblility.
196
+ for i := 0 ; i < 3 ; i ++ {
197
+ currentLeader := clus .WaitLeader (t )
198
+ t .Logf ("[%d] current leader index %d" , i , currentLeader )
199
+
200
+ time .Sleep (2 * time .Second )
201
+
202
+ newLeader := clus .WaitLeader (t )
203
+ if newLeader == currentLeader {
204
+ break
205
+ }
206
+ t .Logf ("[%d] leader changed, new leader index %d" , i , newLeader )
207
+ }
192
208
193
209
ttlResp , err := cc .TimeToLive (ctx , leaseResp .ID , config.LeaseOption {})
194
210
require .NoError (t , err )
0 commit comments