update deepfm network.

pull/7055/head
linqingke 5 years ago
parent ca90924fa4
commit 3ac12129a6

@ -1,62 +1,55 @@
# Copyright 2020 Huawei Technologies Co., Ltd # Copyright 2020 Huawei Technologies Co., Ltd
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
# You may obtain a copy of the License at # You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0 # http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software # Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# ============================================================================ # ============================================================================
""" """
network config setting, will be used in train.py and eval.py network config setting, will be used in train.py and eval.py
""" """
class DataConfig:
class DataConfig: """data config"""
""" data_vocab_size = 184965
Define parameters of dataset. train_num_of_parts = 21
""" test_num_of_parts = 3
data_vocab_size = 184965 batch_size = 16000
train_num_of_parts = 21 data_field_size = 39
test_num_of_parts = 3 data_format = 2
batch_size = 1000
data_field_size = 39 class ModelConfig:
# dataset format, 1: mindrecord, 2: tfrecord, 3: h5 """model config"""
data_format = 2 batch_size = DataConfig.batch_size
data_field_size = DataConfig.data_field_size
data_vocab_size = DataConfig.data_vocab_size
class ModelConfig: data_emb_dim = 80
""" deep_layer_args = [[1024, 512, 256, 128], "relu"]
Define parameters of model. init_args = [-0.01, 0.01]
""" weight_bias_init = ['normal', 'normal']
batch_size = DataConfig.batch_size keep_prob = 0.9
data_field_size = DataConfig.data_field_size
data_vocab_size = DataConfig.data_vocab_size class TrainConfig:
data_emb_dim = 80 """train config"""
deep_layer_args = [[400, 400, 512], "relu"] batch_size = DataConfig.batch_size
init_args = [-0.01, 0.01] l2_coef = 8e-5
weight_bias_init = ['normal', 'normal'] learning_rate = 5e-4
keep_prob = 0.9 epsilon = 5e-8
loss_scale = 1024.0
class TrainConfig: train_epochs = 5
"""
Define parameters of training. save_checkpoint = True
""" ckpt_file_name_prefix = "deepfm"
batch_size = DataConfig.batch_size save_checkpoint_steps = 1
l2_coef = 1e-6 keep_checkpoint_max = 50
learning_rate = 1e-5
epsilon = 1e-8 eval_callback = True
loss_scale = 1024.0 loss_callback = True
train_epochs = 15
save_checkpoint = True
ckpt_file_name_prefix = "deepfm"
save_checkpoint_steps = 1
keep_checkpoint_max = 15
eval_callback = True
loss_callback = True

File diff suppressed because it is too large Load Diff
Loading…
Cancel
Save