From 47346cb1c1a60b6b77ff139a79bc0c3038ef9d2e Mon Sep 17 00:00:00 2001 From: "Gang Hyeok Lee (Robin)" <110103843+ghlrobin@users.noreply.github.com> Date: Thu, 13 Mar 2025 14:07:13 +0900 Subject: [PATCH] Update fine-tune-vit.md load_metric has been removed in datasets@3.0.0 replaced it with load from evaluate --- fine-tune-vit.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fine-tune-vit.md b/fine-tune-vit.md index cee1142118..0bc5e6e006 100644 --- a/fine-tune-vit.md +++ b/fine-tune-vit.md @@ -325,14 +325,14 @@ def collate_fn(batch): ### Define an evaluation metric -The [accuracy](https://huggingface.co/metrics/accuracy) metric from `datasets` can easily be used to compare the predictions with the labels. Below, you can see how to use it within a `compute_metrics` function that will be used by the `Trainer`. +The [accuracy](https://huggingface.co/metrics/accuracy) metric from `evaluate` can easily be used to compare the predictions with the labels. Below, you can see how to use it within a `compute_metrics` function that will be used by the `Trainer`. ```python import numpy as np -from datasets import load_metric +from evaluate import load -metric = load_metric("accuracy") +metric = load("accuracy") def compute_metrics(p): return metric.compute(predictions=np.argmax(p.predictions, axis=1), references=p.label_ids) ```