TensorFlow 提供了多种常用网络优化方法,如,adadeltaadagradadamftrlmomentumrmspropsgd.

各优化器的定义方式如下:

# adadelta
optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.001, 
                                       rho=0.95, 
                                       epsilon=1e-8,
                                       use_locking=False, 
                                       name="Adadelta")
# adagrad
optimizer = tf.train.AdagradOptimizer(learning_rate=0.001, 
                                      initial_accumulator_value=0.1,
                                      use_locking=False, 
                                      name="Adagrad")

# adam
optimizer = tf.train.AdamOptimizer(learning_rate=0.001, 
                                   beta1=0.9, 
                                   beta2=0.999, 
                                   epsilon=1e-8,
                                   use_locking=False, 
                                   name="Adam")

# ftrl
optimizer = tf.train.FtrlOptimizer(learning_rate=0.001,
                                   learning_rate_power=-0.5,
                                   initial_accumulator_value=0.1,
                                   l1_regularization_strength=0.0,
                                   l2_regularization_strength=0.0,
                                   use_locking=False,
                                   name="Ftrl",
                                   accum_name=None,
                                   linear_name=None,
                                   l2_shrinkage_regularization_strength=0.0)

# momentum
optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, 
                                       momentum=0.9,
                                       use_locking=False, 
                                       name="Momentum", 
                                       use_nesterov=False)

# rmsprop
optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001, 
                                      decay=0.9,
                                      momentum=0.0,
                                      epsilon=1e-10,
                                      use_locking=False,
                                      centered=False,
                                      name="RMSProp")
# sgd
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001, 
                                              use_locking=False, 
                                              name="GradientDescent")
Last modification:November 20th, 2018 at 10:24 am