Skip to content
New issue

Have a question about this project? # for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “#”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? # to your account

Update 'AdamOptimizer' to 'optimizers.Adam'. #70

Merged
merged 1 commit into from
Sep 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@
"\n",
"The Adagrad optimizer is one alternative. The key insight of Adagrad is that it modifies the learning rate adaptively for each coefficient in a model, monotonically lowering the effective learning rate. This works great for convex problems, but isn't always ideal for the non-convex problem Neural Net training. You can use Adagrad by specifying `AdagradOptimizer` instead of `GradientDescentOptimizer`. Note that you may need to use a larger learning rate with Adagrad.\n",
"\n",
"For non-convex optimization problems, Adam is sometimes more efficient than Adagrad. To use Adam, invoke the `tf.train.AdamOptimizer` method. This method takes several optional hyperparameters as arguments, but our solution only specifies one of these (`learning_rate`). In a production setting, you should specify and tune the optional hyperparameters carefully."
"For non-convex optimization problems, Adam is sometimes more efficient than Adagrad. To use Adam, invoke the `tf.optimizers.Adam` method. This method takes several optional hyperparameters as arguments, but our solution only specifies one of these (`learning_rate`). In a production setting, you should specify and tune the optional hyperparameters carefully."
]
},
{
Expand Down Expand Up @@ -709,7 +709,7 @@
"outputs": [],
"source": [
"_, adam_training_losses, adam_validation_losses = train_nn_regression_model(\n",
" my_optimizer=tf.train.AdamOptimizer(learning_rate=0.009),\n",
" my_optimizer=tf.optimizers.Adam(learning_rate=0.009),\n",
" steps=500,\n",
" batch_size=100,\n",
" hidden_units=[10, 10],\n",
Expand Down
6 changes: 3 additions & 3 deletions ml/testing-debugging/testing-debugging-ml-debugging.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@
"\n",
"# Model calculates loss using mean-square error (MSE)\n",
"# Model trains using Adam optimizer with learning rate = 0.001\n",
"model.compile(optimizer=tf.train.AdamOptimizer(0.001),\n",
"model.compile(optimizer=tf.optimizers.Adam(0.001),\n",
" loss='mse',\n",
" )\n",
"\n",
Expand Down Expand Up @@ -327,7 +327,7 @@
"model = None\n",
"model = keras.Sequential()\n",
"model.add(keras.layers.Dense(1, activation='linear', input_dim=1))\n",
"model.compile(optimizer=tf.train.AdamOptimizer(0.001), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(0.001), loss='mse')\n",
"trainHistory = model.fit(features, labels, epochs=10, batch_size=1, verbose=1)\n",
"# Plot loss curve\n",
"plt.plot(trainHistory.history['loss'])\n",
Expand Down Expand Up @@ -377,7 +377,7 @@
"model = None\n",
"model = keras.Sequential()\n",
"model.add(keras.layers.Dense(1, activation='linear', input_dim=1))\n",
"model.compile(optimizer=tf.train.AdamOptimizer(0.1), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(0.1), loss='mse')\n",
"model.fit(features, labels, epochs=5, batch_size=1, verbose=1)"
],
"execution_count": 0,
Expand Down
22 changes: 11 additions & 11 deletions ml/testing-debugging/testing-debugging-regression.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -750,7 +750,7 @@
"model = keras.Sequential()\n",
"model.add(keras.layers.Dense(units=1, activation='linear', input_dim=1))\n",
"# Specify the optimizer using the TF API to specify the learning rate\n",
"model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.01),\n",
"model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01),\n",
" loss='mse')\n",
"# Train the model!\n",
"trainHistory = model.fit(wineFeaturesSimple,\n",
Expand Down Expand Up @@ -826,7 +826,7 @@
"model = keras.Sequential()\n",
"model.add(keras.layers.Dense(units=1, activation='linear', input_dim=1))\n",
"# Specify the optimizer using the TF API to specify the learning rate\n",
"model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.01),\n",
"model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01),\n",
" loss='mse')\n",
"# Train the model!\n",
"trainHistory = model.fit(wineFeaturesSimple,\n",
Expand Down Expand Up @@ -877,7 +877,7 @@
" activation='linear'))\n",
"model.add(...) # add second layer\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(learning_rate=), loss='mse')\n",
"# Train\n",
"trainHistory = model.fit(wineFeaturesSimple,\n",
" wineLabels,\n",
Expand Down Expand Up @@ -929,7 +929,7 @@
" activation='linear'))\n",
"model.add(keras.layers.Dense(1, activation='linear')) # add second layer\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.01), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss='mse')\n",
"# Train\n",
"trainHistory = model.fit(wineFeaturesSimple,\n",
" wineLabels,\n",
Expand Down Expand Up @@ -976,7 +976,7 @@
" activation=))\n",
"model.add(keras.layers.Dense(1, activation='linear'))\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(), loss='mse')\n",
"# Fit\n",
"model.fit(wineFeaturesSimple,\n",
" wineLabels,\n",
Expand Down Expand Up @@ -1026,7 +1026,7 @@
" activation='relu'))\n",
"model.add(keras.layers.Dense(1, activation='linear'))\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(), loss='mse')\n",
"# Fit\n",
"model.fit(wineFeaturesSimple,\n",
" wineLabels,\n",
Expand Down Expand Up @@ -1071,7 +1071,7 @@
"# Add more layers here\n",
"model.add(keras.layers.Dense(1,activation='linear'))\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(), loss='mse')\n",
"# Train\n",
"trainHistory = model.fit(wineFeaturesSimple,\n",
" wineLabels,\n",
Expand Down Expand Up @@ -1150,7 +1150,7 @@
"# Add more layers here\n",
"model.add(keras.layers.Dense(1,activation='linear'))\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(), loss='mse')\n",
"# Train\n",
"trainHistory = model.fit(wineFeaturesSimple,\n",
" wineLabels,\n",
Expand Down Expand Up @@ -1198,7 +1198,7 @@
"model.add(keras.layers.Dense(wineFeaturesSmall.shape[1], activation='relu'))\n",
"model.add(keras.layers.Dense(1, activation='linear'))\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(), loss='mse') # set LR\n",
"model.compile(optimizer=tf.optimizers.Adam(), loss='mse') # set LR\n",
"# Train\n",
"trainHistory = model.fit(wineFeaturesSmall,\n",
" wineLabelsSmall,\n",
Expand Down Expand Up @@ -1257,7 +1257,7 @@
"model.add(keras.layers.Dense(wineFeaturesSmall.shape[1], activation='relu'))\n",
"model.add(keras.layers.Dense(1, activation='linear'))\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(0.01), loss='mse') # set LR\n",
"model.compile(optimizer=tf.optimizers.Adam(0.01), loss='mse') # set LR\n",
"# Train\n",
"trainHistory = model.fit(wineFeaturesSmall,\n",
" wineLabelsSmall,\n",
Expand Down Expand Up @@ -1308,7 +1308,7 @@
"model.add(keras.layers.Dense(wineFeatures.shape[1], activation='relu'))\n",
"model.add(keras.layers.Dense(1,activation='linear'))\n",
"# Compile\n",
"model.compile(optimizer=tf.train.AdamOptimizer(), loss='mse')\n",
"model.compile(optimizer=tf.optimizers.Adam(), loss='mse')\n",
"# Train the model!\n",
"trainHistory = model.fit(wineFeatures, wineLabels, epochs=100, batch_size=100,\n",
" verbose=1, validation_split = 0.2)\n",
Expand Down