Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added the utility function to print model flops #47

Open
wants to merge 3 commits into
base: llama2-google-next-training
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions src/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1804,7 +1804,10 @@ def _inner_training_loop(
profile_epoch = int(os.environ.get('PROFILE_EPOCH', -1))
profile_duration = int(os.environ.get('PROFILE_DURATION_MS', 20000))
profile_logdir = os.environ.get('PROFILE_LOGDIR', None)

self.num_compilations = 0
for step, inputs in enumerate(epoch_iterator):
self.last_time_stamp = time.time()
if step == 0 and epoch == 0:
print('input sharding', {k: (v.shape, torch_xla._XLAC._get_xla_sharding_spec(v)) for k, v in inputs.items()})
total_batched_samples += 1
Expand Down Expand Up @@ -1896,6 +1899,18 @@ def _inner_training_loop(
if not isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.step()


xm.mark_step()
if self.num_compilations != met.metric_data('CompileTime')[:1] :
self.num_compilations = met.metric_data('CompileTime')[:1]
else:
xm.rendezvous('step')
step_time = time.time() - self.last_time_stamp
data, fsdp, mdl = self.args.spmd_mesh.ici_mesh_shape
num_devices = data * fsdp * mdl
num_tokens = inputs["input_ids"].numel() / num_devices
xm.master_print(f"Step time: {step_time}: Model TFLOPS: {self.model_flops(step_time, num_tokens)}")
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @yeounoh, we can emit this to Tensorboard for perf tests (in addition to the convergence metrics)


model.zero_grad()
self.state.global_step += 1
self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch
Expand All @@ -1905,6 +1920,7 @@ def _inner_training_loop(
else:
self.control = self.callback_handler.on_substep_end(args, self.state, self.control)


if self.control.should_epoch_stop or self.control.should_training_stop:
break

Expand Down Expand Up @@ -2694,8 +2710,21 @@ def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor,
else:
self.accelerator.backward(loss)


# TODO: implement memory info for PJRT
#xm.master_print(f"Memory Info: {xm.get_memory_info(xm.xla_device())}")


return loss.detach() / self.args.gradient_accumulation_steps

def model_flops(self, step_time, num_tokens):
num_trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
model_flops = 6 * num_trainable_params * num_tokens
model_tflops_per_second = model_flops / step_time / 1e12
return model_tflops_per_second



def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Expand Down Expand Up @@ -3169,6 +3198,9 @@ def evaluation_loop(
if is_torch_tpu_available():
xm.mark_step()




# Update containers on host
if loss is not None:
losses = self.accelerator.gather_for_metrics((loss.repeat(batch_size)))
Expand Down