diff --git a/ppsci/metric/func.py b/ppsci/metric/func.py index 7cc74dd3f..56273784d 100644 --- a/ppsci/metric/func.py +++ b/ppsci/metric/func.py @@ -27,17 +27,23 @@ class FunctionalMetric(base.Metric): keep_batch (bool, optional): Whether keep batch axis. Defaults to False. Examples: - >>> import ppsci >>> import paddle + >>> from ppsci.metric import FunctionalMetric >>> def metric_expr(output_dict, *args): ... rel_l2 = 0 ... for key in output_dict: ... length = int(len(output_dict[key])/2) - ... out_dict = {key: output_dict[key][:length]} - ... label_dict = {key: output_dict[key][length:]} + ... out_dict = output_dict[key][:length] + ... label_dict = output_dict[key][length:] ... rel_l2 += paddle.norm(out_dict - label_dict) / paddle.norm(label_dict) - ... return {"l2": rel_l2} - >>> metric_dict = ppsci.metric.FunctionalMetric(metric_expr) + ... return {"rel_l2": rel_l2} + >>> metric_dict = FunctionalMetric(metric_expr) + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3], [-0.2, 1.5], [-0.1, -0.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3], [-1.8, 1.0], [-0.2, 2.5]])} + >>> result = metric_dict(output_dict) + >>> print(result) + {'rel_l2': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.59985542)} """ def __init__( diff --git a/ppsci/metric/l2_rel.py b/ppsci/metric/l2_rel.py index 4de9c186d..6bb162006 100644 --- a/ppsci/metric/l2_rel.py +++ b/ppsci/metric/l2_rel.py @@ -40,8 +40,18 @@ class L2Rel(base.Metric): keep_batch (bool, optional): Whether keep batch axis. Defaults to False. Examples: - >>> import ppsci - >>> metric = ppsci.metric.L2Rel() + >>> import paddle + >>> from ppsci.metric import L2Rel + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = L2Rel() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.42658269), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 9.69535923)} """ # NOTE: Avoid divide by zero in result @@ -85,8 +95,24 @@ class MeanL2Rel(base.Metric): keep_batch (bool, optional): Whether keep batch axis. Defaults to False. Examples: - >>> import ppsci - >>> metric = ppsci.metric.MeanL2Rel() + >>> import paddle + >>> from ppsci.metric import MeanL2Rel + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = MeanL2Rel() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.35970235), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 9.24504089)} + >>> loss = MeanL2Rel(keep_batch=True) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [1.11803389, 1.60137081]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [6.32455540 , 12.16552544])} """ # NOTE: Avoid divide by zero in result diff --git a/ppsci/metric/mae.py b/ppsci/metric/mae.py index 10a491f7f..0554a3cc5 100644 --- a/ppsci/metric/mae.py +++ b/ppsci/metric/mae.py @@ -35,8 +35,24 @@ class MAE(base.Metric): keep_batch (bool, optional): Whether keep batch axis. Defaults to False. Examples: - >>> import ppsci - >>> metric = ppsci.metric.MAE() + >>> import paddle + >>> from ppsci.metric import MAE + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = MAE() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 1.87500000), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.89999998)} + >>> loss = MAE(keep_batch=True) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [1.20000005, 2.54999995]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [0.59999996, 1.20000005])} """ def __init__(self, keep_batch: bool = False): diff --git a/ppsci/metric/mse.py b/ppsci/metric/mse.py index ee21f6b0c..9b42f3646 100644 --- a/ppsci/metric/mse.py +++ b/ppsci/metric/mse.py @@ -35,8 +35,24 @@ class MSE(base.Metric): keep_batch (bool, optional): Whether keep batch axis. Defaults to False. Examples: - >>> import ppsci - >>> metric = ppsci.metric.MSE() + >>> import paddle + >>> from ppsci.metric import MSE + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = MSE() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 5.35750008), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.94000000)} + >>> loss = MSE(keep_batch=True) + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [2.65000010, 8.06499958]), 'v': Tensor(shape=[2], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [0.39999998, 1.48000002])} """ def __init__(self, keep_batch: bool = False): diff --git a/ppsci/metric/rmse.py b/ppsci/metric/rmse.py index 238aed186..625538088 100644 --- a/ppsci/metric/rmse.py +++ b/ppsci/metric/rmse.py @@ -41,8 +41,18 @@ class RMSE(base.Metric): keep_batch (bool, optional): Whether keep batch axis. Defaults to False. Examples: - >>> import ppsci - >>> metric = ppsci.metric.RMSE() + >>> import paddle + >>> from ppsci.metric import RMSE + >>> output_dict = {'u': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]]), + ... 'v': paddle.to_tensor([[0.5, 0.9], [1.1, -1.3]])} + >>> label_dict = {'u': paddle.to_tensor([[-1.8, 1.0], [-0.2, 2.5]]), + ... 'v': paddle.to_tensor([[0.1, 0.1], [0.1, 0.1]])} + >>> loss = RMSE() + >>> result = loss(output_dict, label_dict) + >>> print(result) + {'u': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 2.31462741), 'v': Tensor(shape=[], dtype=float32, place=Place(gpu:0), stop_gradient=True, + 0.96953595)} """ def __init__(self, keep_batch: bool = False): diff --git a/ppsci/solver/solver.py b/ppsci/solver/solver.py index 4b8d8643f..3f3fbcd29 100644 --- a/ppsci/solver/solver.py +++ b/ppsci/solver/solver.py @@ -520,6 +520,21 @@ def predict( Returns: Dict[str, Union[paddle.Tensor, np.ndarray]]: Prediction in dict. + + Examples: + >>> import paddle + >>> import ppsci + >>> paddle.seed(42) # doctest: +SKIP + >>> model = ppsci.arch.MLP(('x', 'y'), ('u', 'v'), num_layers=None, hidden_size=[32, 8]) + >>> solver = ppsci.solver.Solver(model) # doctest: +SKIP + >>> input_dict = {'x': paddle.rand((2, 1)), + ... 'y': paddle.rand((2, 1))} + >>> solver.predict(input_dict) # doctest: +SKIP + {'u': Tensor(shape=[2, 1], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [[-0.17509711], + [-0.03884222]]), 'v': Tensor(shape=[2, 1], dtype=float32, place=Place(gpu:0), stop_gradient=True, + [[0.27433380], + [0.42387512]])} """ num_samples = len(next(iter(input_dict.values()))) num_pad = (self.world_size - num_samples % self.world_size) % self.world_size