@@ -30,7 +30,9 @@ import (
30
30
"github.com/opencurve/curveadm/internal/configure/topology"
31
31
"github.com/opencurve/curveadm/internal/errno"
32
32
"github.com/opencurve/curveadm/internal/playbook"
33
- tui "github.com/opencurve/curveadm/internal/tui/common"
33
+ "github.com/opencurve/curveadm/internal/task/task/common"
34
+ tuicomm "github.com/opencurve/curveadm/internal/tui/common"
35
+
34
36
cliutil "github.com/opencurve/curveadm/internal/utils"
35
37
"github.com/spf13/cobra"
36
38
)
@@ -71,26 +73,23 @@ var (
71
73
// chunkserevr (curvebs)
72
74
MIGRATE_CHUNKSERVER_STEPS = []int {
73
75
playbook .BACKUP_ETCD_DATA ,
74
- playbook .STOP_SERVICE ,
75
- playbook .CLEAN_SERVICE , // only container
76
+ playbook .CREATE_PHYSICAL_POOL , // add machine that migrate to
76
77
playbook .PULL_IMAGE ,
77
78
playbook .CREATE_CONTAINER ,
78
79
playbook .SYNC_CONFIG ,
79
- playbook .CREATE_PHYSICAL_POOL ,
80
80
playbook .START_CHUNKSERVER ,
81
- playbook .CREATE_LOGICAL_POOL ,
81
+ playbook .MARK_SERVER_PENGDDING , // start migrate to new server
82
82
}
83
83
84
84
// metaserver (curvefs)
85
85
MIGRATE_METASERVER_STEPS = []int {
86
86
playbook .BACKUP_ETCD_DATA ,
87
- playbook .STOP_SERVICE , // only container
88
- playbook .CLEAN_SERVICE ,
87
+ playbook .CREATE_LOGICAL_POOL ,
89
88
playbook .PULL_IMAGE ,
90
89
playbook .CREATE_CONTAINER ,
91
90
playbook .SYNC_CONFIG ,
92
91
playbook .START_METASERVER ,
93
- playbook .CREATE_LOGICAL_POOL ,
92
+ playbook .STOP_SERVICE , // start migrate to new server
94
93
}
95
94
96
95
MIGRATE_ROLE_STEPS = map [string ][]int {
@@ -100,12 +99,21 @@ var (
100
99
topology .ROLE_SNAPSHOTCLONE : MIGRATE_SNAPSHOTCLONE_STEPS ,
101
100
topology .ROLE_METASERVER : MIGRATE_METASERVER_STEPS ,
102
101
}
102
+
103
+ MIGRATE_POST_CLEAN_STEPS = []int {
104
+ playbook .STOP_SERVICE , // bs
105
+ playbook .CLEAN_SERVICE , // bs, fs
106
+ playbook .CREATE_PHYSICAL_POOL , // only for chunkserver, remove server that migrate from
107
+ playbook .CREATE_LOGICAL_POOL , // only for metaserver, remove server that migrate from
108
+ playbook .UPDATE_TOPOLOGY , // bs, fs
109
+ }
103
110
)
104
111
105
112
type migrateOptions struct {
106
113
filename string
107
114
poolset string
108
115
poolsetDiskType string
116
+ clean bool
109
117
}
110
118
111
119
func NewMigrateCommand (curveadm * cli.CurveAdm ) * cobra.Command {
@@ -125,7 +133,7 @@ func NewMigrateCommand(curveadm *cli.CurveAdm) *cobra.Command {
125
133
flags := cmd .Flags ()
126
134
flags .StringVar (& options .poolset , "poolset" , "default" , "Specify the poolset" )
127
135
flags .StringVar (& options .poolsetDiskType , "poolset-disktype" , "ssd" , "Specify the disk type of physical pool" )
128
-
136
+ flags . BoolVar ( & options . clean , "clean" , false , "Clean migrated environment for chunkserver or metaserver" )
129
137
return cmd
130
138
}
131
139
@@ -191,8 +199,21 @@ func genMigratePlaybook(curveadm *cli.CurveAdm,
191
199
migrates := getMigrates (curveadm , data )
192
200
role := migrates [0 ].From .GetRole ()
193
201
steps := MIGRATE_ROLE_STEPS [role ]
194
- poolset := options .poolset
195
- poolsetDiskType := options .poolsetDiskType
202
+
203
+ // post clean
204
+ if options .clean {
205
+ steps = MIGRATE_POST_CLEAN_STEPS
206
+ if migrates [0 ].From .GetKind () == common .KIND_CURVEBS {
207
+ steps = append (steps [:3 ], steps [4 :]... )
208
+ } else {
209
+ steps = append (steps [1 :2 ], steps [3 :]... )
210
+ }
211
+ }
212
+
213
+ poolset := configure.Poolset {
214
+ Name : options .poolset ,
215
+ Type : options .poolsetDiskType ,
216
+ }
196
217
197
218
pb := playbook .NewPlaybook (curveadm )
198
219
for _ , step := range steps {
@@ -204,38 +225,40 @@ func genMigratePlaybook(curveadm *cli.CurveAdm,
204
225
config = dcs2del
205
226
case playbook .BACKUP_ETCD_DATA :
206
227
config = curveadm .FilterDeployConfigByRole (dcs , topology .ROLE_ETCD )
207
- case CREATE_PHYSICAL_POOL ,
208
- CREATE_LOGICAL_POOL :
228
+ case
229
+ playbook .CREATE_PHYSICAL_POOL ,
230
+ playbook .CREATE_LOGICAL_POOL ,
231
+ playbook .MARK_SERVER_PENGDDING :
209
232
config = curveadm .FilterDeployConfigByRole (dcs , topology .ROLE_MDS )[:1 ]
210
233
}
211
234
212
235
// options
213
- options := map [string ]interface {}{}
236
+ optionsKV := map [string ]interface {}{}
214
237
switch step {
215
238
case playbook .CLEAN_SERVICE :
216
- options [comm .KEY_CLEAN_ITEMS ] = []string {comm .CLEAN_ITEM_CONTAINER }
217
- options [comm .KEY_CLEAN_BY_RECYCLE ] = true
239
+ optionsKV [comm .KEY_CLEAN_ITEMS ] = []string {comm .CLEAN_ITEM_CONTAINER }
240
+ optionsKV [comm .KEY_CLEAN_BY_RECYCLE ] = true
241
+ optionsKV [comm .KEY_REMOVE_MIGRATED_SERVER ] = true
218
242
case playbook .CREATE_PHYSICAL_POOL :
219
- options [comm .KEY_CREATE_POOL_TYPE ] = comm .POOL_TYPE_PHYSICAL
220
- options [comm .KEY_MIGRATE_SERVERS ] = migrates
221
- options [comm .POOLSET ] = poolset
222
- options [comm .POOLSET_DISK_TYPE ] = poolsetDiskType
243
+ optionsKV [comm .KEY_CREATE_POOL_TYPE ] = comm .POOL_TYPE_PHYSICAL
244
+ optionsKV [comm .KEY_MIGRATE_SERVERS ] = migrates
245
+ optionsKV [comm .KEY_POOLSET ] = poolset
223
246
case playbook .CREATE_LOGICAL_POOL :
224
- options [comm .KEY_CREATE_POOL_TYPE ] = comm .POOL_TYPE_LOGICAL
225
- options [comm .KEY_MIGRATE_SERVERS ] = migrates
226
- options [comm .KEY_NEW_TOPOLOGY_DATA ] = data
227
- options [comm .POOLSET ] = poolset
228
- options [comm .POOLSET_DISK_TYPE ] = poolsetDiskType
247
+ optionsKV [comm .KEY_CREATE_POOL_TYPE ] = comm .POOL_TYPE_LOGICAL
248
+ optionsKV [comm .KEY_MIGRATE_SERVERS ] = migrates
249
+ optionsKV [comm .KEY_NEW_TOPOLOGY_DATA ] = data
250
+ optionsKV [comm .KEY_IF_UPDATE_TOPOLOG ] = false
251
+ optionsKV [comm .KEY_POOLSET ] = poolset
229
252
case playbook .UPDATE_TOPOLOGY :
230
- options [comm .KEY_NEW_TOPOLOGY_DATA ] = data
253
+ optionsKV [comm .KEY_NEW_TOPOLOGY_DATA ] = data
231
254
}
232
255
233
256
pb .AddStep (& playbook.PlaybookStep {
234
- Type : step ,
235
- Configs : config ,
236
- Options : options ,
257
+ Type : step ,
258
+ Configs : config ,
259
+ Options : optionsKV ,
237
260
ExecOptions : playbook.ExecOptions {
238
- SilentSubBar : step == playbook .UPDATE_TOPOLOGY ,
261
+ // SilentSubBar: step == playbook.UPDATE_TOPOLOGY,
239
262
},
240
263
})
241
264
}
@@ -261,7 +284,10 @@ func runMigrate(curveadm *cli.CurveAdm, options migrateOptions) error {
261
284
}
262
285
263
286
// 2) read topology from file
264
- data , err := readTopology (curveadm , options .filename )
287
+ data , err := readTopology (curveadm ,
288
+ options .filename ,
289
+ options .clean ,
290
+ )
265
291
if err != nil {
266
292
return err
267
293
}
@@ -272,13 +298,15 @@ func runMigrate(curveadm *cli.CurveAdm, options migrateOptions) error {
272
298
return err
273
299
}
274
300
275
- // 4) display title
276
- displayMigrateTitle (curveadm , data )
301
+ if ! options .clean {
302
+ // 4) display title
303
+ displayMigrateTitle (curveadm , data )
277
304
278
- // 5) confirm by user
279
- if pass := tui .ConfirmYes (tui .DEFAULT_CONFIRM_PROMPT ); ! pass {
280
- curveadm .WriteOutln (tui .PromptCancelOpetation ("migrate service" ))
281
- return errno .ERR_CANCEL_OPERATION
305
+ // 5) confirm by user
306
+ if pass := tuicomm .ConfirmYes (tuicomm .DEFAULT_CONFIRM_PROMPT ); ! pass {
307
+ curveadm .WriteOutln (tuicomm .PromptCancelOpetation ("migrate service" ))
308
+ return errno .ERR_CANCEL_OPERATION
309
+ }
282
310
}
283
311
284
312
// 6) generate migrate playbook
@@ -294,6 +322,9 @@ func runMigrate(curveadm *cli.CurveAdm, options migrateOptions) error {
294
322
}
295
323
296
324
// 9) print success prompt
325
+ if options .clean {
326
+ return nil
327
+ }
297
328
curveadm .WriteOutln ("" )
298
329
curveadm .WriteOutln (color .GreenString ("Services successfully migrateed ^_^." ))
299
330
// TODO(P1): warning iff there is changed configs
0 commit comments