forked from tensorflow/models
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathoptimizer.proto
73 lines (63 loc) · 2.43 KB
/
optimizer.proto
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
syntax = "proto2";
package object_detection.protos;
// Messages for configuring the optimizing strategy for training object
// detection models.
// Top level optimizer message.
message Optimizer {
oneof optimizer {
RMSPropOptimizer rms_prop_optimizer = 1;
MomentumOptimizer momentum_optimizer = 2;
AdamOptimizer adam_optimizer = 3;
}
optional bool use_moving_average = 4 [default=true];
optional float moving_average_decay = 5 [default=0.9999];
}
// Configuration message for the RMSPropOptimizer
// See: https://www.tensorflow.org/api_docs/python/tf/train/RMSPropOptimizer
message RMSPropOptimizer {
optional LearningRate learning_rate = 1;
optional float momentum_optimizer_value = 2 [default=0.9];
optional float decay = 3 [default=0.9];
optional float epsilon = 4 [default=1.0];
}
// Configuration message for the MomentumOptimizer
// See: https://www.tensorflow.org/api_docs/python/tf/train/MomentumOptimizer
message MomentumOptimizer {
optional LearningRate learning_rate = 1;
optional float momentum_optimizer_value = 2 [default=0.9];
}
// Configuration message for the AdamOptimizer
// See: https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
message AdamOptimizer {
optional LearningRate learning_rate = 1;
}
// Configuration message for optimizer learning rate.
message LearningRate {
oneof learning_rate {
ConstantLearningRate constant_learning_rate = 1;
ExponentialDecayLearningRate exponential_decay_learning_rate = 2;
ManualStepLearningRate manual_step_learning_rate = 3;
}
}
// Configuration message for a constant learning rate.
message ConstantLearningRate {
optional float learning_rate = 1 [default=0.002];
}
// Configuration message for an exponentially decaying learning rate.
// See https://www.tensorflow.org/versions/master/api_docs/python/train/ \
// decaying_the_learning_rate#exponential_decay
message ExponentialDecayLearningRate {
optional float initial_learning_rate = 1 [default=0.002];
optional uint32 decay_steps = 2 [default=4000000];
optional float decay_factor = 3 [default=0.95];
optional bool staircase = 4 [default=true];
}
// Configuration message for a manually defined learning rate schedule.
message ManualStepLearningRate {
optional float initial_learning_rate = 1 [default=0.002];
message LearningRateSchedule {
optional uint32 step = 1;
optional float learning_rate = 2 [default=0.002];
}
repeated LearningRateSchedule schedule = 2;
}