Skip to content

Commit

Permalink
Fix multiproc metrics in no_trainer examples (#16865)
Browse files Browse the repository at this point in the history
  • Loading branch information
muellerzr authored Apr 20, 2022
1 parent 175da8d commit 705d653
Show file tree
Hide file tree
Showing 7 changed files with 80 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -457,12 +457,21 @@ def collate_fn(examples):
break

model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather((predictions, batch["labels"]))
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader):
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
predictions=predictions,
references=references,
)

eval_metric = metric.compute()
Expand Down
13 changes: 11 additions & 2 deletions examples/pytorch/multiple-choice/run_swag_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -559,13 +559,22 @@ def preprocess_function(examples):
break

model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1)
predictions, references = accelerator.gather((predictions, batch["labels"]))
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader):
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
predictions=predictions,
references=references,
)

eval_metric = metric.compute()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -567,6 +567,7 @@ def preprocess_val(example_batch):

logger.info("***** Running evaluation *****")
model.eval()
samples_seen = 0
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
outputs = model(**batch)

Expand All @@ -575,9 +576,19 @@ def preprocess_val(example_batch):
)
predictions = upsampled_logits.argmax(dim=1)

predictions, references = accelerator.gather((predictions, batch["labels"]))

# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader):
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]

metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
predictions=predictions,
references=references,
)

eval_metrics = metric.compute(
Expand Down
20 changes: 16 additions & 4 deletions examples/pytorch/summarization/run_summarization_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -628,6 +628,7 @@ def postprocess_text(preds, labels):
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
Expand All @@ -644,8 +645,9 @@ def postprocess_text(preds, labels):
# If we did not pad to max length, we need to pad the labels too
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)

generated_tokens = accelerator.gather(generated_tokens).cpu().numpy()
labels = accelerator.gather(labels).cpu().numpy()
generated_tokens, labels = accelerator.gather((generated_tokens, labels))
generated_tokens = generated_tokens.cpu().numpy()
labels = labels.cpu().numpy()

if args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
Expand All @@ -656,8 +658,18 @@ def postprocess_text(preds, labels):
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)

metric.add_batch(predictions=decoded_preds, references=decoded_labels)
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader):
decoded_preds = decoded_preds[: len(eval_dataloader.dataset) - samples_seen]
decoded_labels = decoded_labels[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += decoded_labels.shape[0]

metric.add_batch(
predictions=decoded_preds,
references=decoded_labels,
)
result = metric.compute(use_stemmer=True)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
Expand Down
13 changes: 11 additions & 2 deletions examples/pytorch/text-classification/run_glue_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,12 +506,21 @@ def preprocess_function(examples):
break

model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
outputs = model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not is_regression else outputs.logits.squeeze()
predictions, references = accelerator.gather((predictions, batch["labels"]))
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader):
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
references = references[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=accelerator.gather(predictions),
references=accelerator.gather(batch["labels"]),
predictions=predictions,
references=references,
)

eval_metric = metric.compute()
Expand Down
12 changes: 9 additions & 3 deletions examples/pytorch/token-classification/run_ner_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -658,6 +658,7 @@ def compute_metrics():
break

model.eval()
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
Expand All @@ -666,9 +667,14 @@ def compute_metrics():
if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered
predictions = accelerator.pad_across_processes(predictions, dim=1, pad_index=-100)
labels = accelerator.pad_across_processes(labels, dim=1, pad_index=-100)

predictions_gathered = accelerator.gather(predictions)
labels_gathered = accelerator.gather(labels)
predictions_gathered, labels_gathered = accelerator.gather((predictions, labels))
# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader):
predictions_gathered = predictions_gathered[: len(eval_dataloader.dataset) - samples_seen]
labels_gathered = labels_gathered[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += labels_gathered.shape[0]
preds, refs = get_labels(predictions_gathered, labels_gathered)
metric.add_batch(
predictions=preds,
Expand Down
9 changes: 9 additions & 0 deletions examples/pytorch/translation/run_translation_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -613,6 +613,7 @@ def postprocess_text(preds, labels):
"max_length": args.val_max_target_length if args is not None else config.max_length,
"num_beams": args.num_beams,
}
samples_seen = 0
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
generated_tokens = accelerator.unwrap_model(model).generate(
Expand Down Expand Up @@ -641,6 +642,14 @@ def postprocess_text(preds, labels):

decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)

# If we are in a multiprocess environment, the last batch has duplicates
if accelerator.num_processes > 1:
if step == len(eval_dataloader):
decoded_preds = decoded_preds[: len(eval_dataloader.dataset) - samples_seen]
decoded_labels = decoded_labels[: len(eval_dataloader.dataset) - samples_seen]
else:
samples_seen += decoded_labels.shape[0]

metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metric = metric.compute()
logger.info({"bleu": eval_metric["score"]})
Expand Down

0 comments on commit 705d653

Please # to comment.