Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

[remove fluid.layers.cross_entropy] remove unit tests (part 3) #48918

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion python/paddle/fluid/tests/unittests/test_dist_transpiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -747,7 +747,9 @@ def emb_pool(ids, table_name, is_distributed):
)

label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.003)
optimizer.minimize(avg_cost)
Expand Down
14 changes: 12 additions & 2 deletions python/paddle/fluid/tests/unittests/test_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -1168,7 +1168,12 @@ def mlp(input_x, input_y):
prediction = fluid.layers.fc(
input=[drop_res], size=2, act='softmax'
)
cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
sum_cost = paddle.mean(cost)
return drop_res, prediction, sum_cost

Expand Down Expand Up @@ -1225,7 +1230,12 @@ def mlp(input_x, input_y):
prediction = fluid.layers.fc(
input=[drop_res], size=2, act='softmax'
)
cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
sum_cost = paddle.mean(cost)
return drop_res, prediction, sum_cost

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,9 @@ def double_fc_net(image):

def fn_1(opt, avg_loss=None, pred=None, label=None):
if avg_loss is None:
loss = layers.cross_entropy(input=pred, label=label)
loss = paddle.nn.functional.cross_entropy(
input=pred, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
opt.minimize(avg_loss)
return avg_loss
Expand Down Expand Up @@ -106,7 +108,12 @@ def fn_2(opt, avg_loss=None, pred=None, label=None):
lambda: fn_2(sgd, None, prediction, label),
)
else:
loss_1 = layers.cross_entropy(input=prediction, label=label)
loss_1 = paddle.nn.functional.cross_entropy(
input=prediction,
label=label,
reduction='none',
use_softmax=False,
)
avg_loss_1 = paddle.mean(loss_1)
loss_2 = paddle.nn.functional.softmax_with_cross_entropy(
logits=prediction, label=label
Expand Down Expand Up @@ -188,7 +195,9 @@ def dynamic(train_data, use_cuda=False, use_parallel_exe=False):
hidden, prediction = dy_layer(var_input)

if epoch % 2 == 0:
cross_entropy_loss = layers.cross_entropy(prediction, var_label)
cross_entropy_loss = paddle.nn.functional.cross_entropy(
prediction, var_label, reduction='none', use_softmax=False
)
loss = paddle.mean(cross_entropy_loss)
loss.backward()
adam.minimize(loss)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ def network_func():
for _ in range(10):
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
fluid.optimizer.Adam().minimize(avg_loss)
return avg_loss
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@ def parallel_exe(
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = paddle.nn.functional.cross_entropy(
input=out, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,9 @@ def simple_fc_net(use_feed):
),
)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
return loss

Expand All @@ -62,7 +64,9 @@ def fc_with_batchnorm(use_feed):
with fluid.name_scope("fc_layer"):
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
with fluid.name_scope("loss"):
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
return loss

Expand Down
4 changes: 3 additions & 1 deletion python/paddle/fluid/tests/unittests/test_profiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,9 @@ def build_program(self, compile_program=True):
hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu')
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(cost)
batch_size = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
Expand Down
16 changes: 12 additions & 4 deletions python/paddle/fluid/tests/unittests/test_program_prune_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,9 @@ def lstm_net(use_feed):
lstm_max_tanh = paddle.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
return avg_cost

Expand All @@ -74,7 +76,9 @@ def simple_fc_net_with_accuracy(use_feed):
),
)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
accuracy_out = paddle.static.accuracy(input=prediction, label=label, k=5)
return loss
Expand All @@ -87,7 +91,9 @@ def cond_net(use_feed=None):

def loss1(pred, label):
x = fluid.layers.data(name="x", shape=[4], dtype='float32')
loss = fluid.layers.cross_entropy(input=pred, label=label)
loss = paddle.nn.functional.cross_entropy(
input=pred, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
return avg_loss

Expand All @@ -114,7 +120,9 @@ def optimization_in_cond_net(with_optimize=False):

def loss1(opt, pred, label, with_optimize):
x = fluid.layers.data(name="x", shape=[4], dtype='float32')
loss = fluid.layers.cross_entropy(input=pred, label=label)
loss = paddle.nn.functional.cross_entropy(
input=pred, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
if with_optimize:
opt.minimize(avg_loss)
Expand Down
34 changes: 22 additions & 12 deletions python/paddle/fluid/tests/unittests/test_prune.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,9 @@ def net(self):
x = fluid.layers.data(name='x', shape=[2], dtype='float32')
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
y = fluid.layers.fc(input=[x], size=2, act="softmax")
loss = fluid.layers.cross_entropy(input=y, label=label)
loss = paddle.nn.functional.cross_entropy(
input=y, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(x=loss)
return x, y, label, loss

Expand All @@ -45,7 +47,7 @@ def test_prune_with_input(self):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
Expand All @@ -55,7 +57,7 @@ def test_prune_with_input(self):
self.assertEqual(len(pruned_program.global_block().ops), 2)
self.assertEqual(
[op.type for op in pruned_program.global_block().ops],
["cross_entropy2", "reduce_mean"],
["softmax_with_cross_entropy", "reduce_mean"],
)

def test_prune(self):
Expand All @@ -71,7 +73,7 @@ def test_prune(self):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
Expand All @@ -83,7 +85,7 @@ def test_prune(self):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
Expand All @@ -101,7 +103,7 @@ def test_prune_target_not_list(self):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
Expand All @@ -113,7 +115,7 @@ def test_prune_target_not_list(self):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
Expand All @@ -131,7 +133,7 @@ def test_prune_target_none(self):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
Expand Down Expand Up @@ -170,9 +172,13 @@ def net1(self):
y = fluid.layers.fc(
input=[x], size=2, act="softmax", param_attr=w_param_attrs
)
loss1 = fluid.layers.cross_entropy(input=y, label=label)
loss1 = paddle.nn.functional.cross_entropy(
input=y, label=label, reduction='none', use_softmax=False
)
loss1 = paddle.mean(x=loss1)
loss2 = fluid.layers.cross_entropy(input=y, label=label)
loss2 = paddle.nn.functional.cross_entropy(
input=y, label=label, reduction='none', use_softmax=False
)
loss2 = paddle.mean(x=loss2)
loss1.persistable = True
loss2.persistable = True
Expand Down Expand Up @@ -200,9 +206,13 @@ def net2(self):
y2 = fluid.layers.fc(
input=[x2], size=2, act="softmax", param_attr=w2_param_attrs
)
loss1 = fluid.layers.cross_entropy(input=y1, label=label)
loss1 = paddle.nn.functional.cross_entropy(
input=y1, label=label, reduction='none', use_softmax=False
)
loss1 = paddle.mean(x=loss1)
loss2 = fluid.layers.cross_entropy(input=y2, label=label)
loss2 = paddle.nn.functional.cross_entropy(
input=y2, label=label, reduction='none', use_softmax=False
)
loss2 = paddle.mean(x=loss2)
return (
x1,
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/fluid/tests/unittests/test_py_func_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,9 @@ def simple_fc_net(img, label, use_py_func_op):

prediction = fluid.layers.fc(hidden, size=10, act='softmax')
if not use_py_func_op:
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
else:
loss = (
fluid.default_main_program()
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/fluid/tests/unittests/test_regularizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,9 @@ def bow_net(
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
return avg_cost

Expand Down
4 changes: 3 additions & 1 deletion python/paddle/fluid/tests/unittests/test_regularizer_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ def bow_net(
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)

return avg_cost
Expand Down
12 changes: 10 additions & 2 deletions python/paddle/fluid/tests/unittests/test_rnn_decode_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,9 @@ def learn(self, act_prob, action, reward, length=None):
self.reward = paddle.static.py_func(
func=reward_func, x=[action, length], out=reward
)
neg_log_prob = layers.cross_entropy(act_prob, action)
neg_log_prob = paddle.nn.functional.cross_entropy(
act_prob, action, reduction='none', use_softmax=False
)
cost = neg_log_prob * reward
cost = (
(paddle.sum(cost) / paddle.sum(length))
Expand Down Expand Up @@ -391,7 +393,13 @@ def __init__(self, lr=None):
self.lr = lr

def learn(self, probs, label, weight=None, length=None):
loss = layers.cross_entropy(input=probs, label=label, soft_label=False)
loss = paddle.nn.functional.cross_entropy(
input=probs,
label=label,
soft_label=False,
reduction='none',
use_softmax=False,
)
max_seq_len = paddle.shape(probs)[1]
mask = layers.sequence_mask(length, maxlen=max_seq_len, dtype="float32")
loss = loss * mask
Expand Down
4 changes: 3 additions & 1 deletion python/paddle/fluid/tests/unittests/test_trainable.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ def test_trainable():
feature = fluid.layers.fc(
input=x, size=10, param_attr=fluid.ParamAttr(trainable=False)
)
loss = fluid.layers.cross_entropy(input=feature, label=label)
loss = paddle.nn.functional.cross_entropy(
input=feature, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
return loss

Expand Down
4 changes: 3 additions & 1 deletion python/paddle/fluid/tests/unittests/test_weight_decay.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,9 @@ def bow_net(
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)

return avg_cost
Expand Down