Skip to content

Commit

Permalink
fix(moe): fix moe zero mode bug (InternLM#548)
Browse files Browse the repository at this point in the history
* fix moe zero mode bugs

* update moe config to fit training on 8 GPU
  • Loading branch information
blankde authored Dec 18, 2023
1 parent bbb5651 commit 513ebb9
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 2 deletions.
2 changes: 1 addition & 1 deletion configs/7B_MoE4_sft.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@
layer_norm_epsilon=1e-5,
use_flash_attn=True,
num_chunks=1, # if num_chunks > 1, interleaved pipeline scheduler is used.
num_experts=8,
num_experts=4,
moe_use_residual=False,
moe_gate_k=2,
)
Expand Down
2 changes: 1 addition & 1 deletion internlm/solver/optimizer/hybrid_zero_optim.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def __init__(
# if zero is used, expert dp group will use ParallelMode.EXPERT_DATA as the real zero mode
zero_mode = (
ParallelMode.ZERO1
if param_group["dp_mode"] == gpc.get_world_size(ParallelMode.ZERO1) == 1 or ParallelMode.DATA
if gpc.get_world_size(ParallelMode.ZERO1) == 1 or param_group["dp_mode"] == ParallelMode.DATA
else ParallelMode.EXPERT_DATA
)
self._zero_local_rank.append(gpc.get_local_rank(zero_mode))
Expand Down

0 comments on commit 513ebb9

Please sign in to comment.