move batch_size from bert_cfg_cfg to cfg

pull/6233/head
yoonlee666 5 years ago
parent b4d527e198
commit 528072f45f

@ -312,6 +312,7 @@ Parameters for training and evaluation can be set in file `config.py` and `finet
``` ```
config for lossscale and etc. config for lossscale and etc.
bert_network version of BERT model: base | nezha, default is base bert_network version of BERT model: base | nezha, default is base
batch_size batch size of input dataset: N, default is 16
loss_scale_value initial value of loss scale: N, default is 2^32 loss_scale_value initial value of loss scale: N, default is 2^32
scale_factor factor used to update loss scale: N, default is 2 scale_factor factor used to update loss scale: N, default is 2
scale_window steps for once updatation of loss scale: N, default is 1000 scale_window steps for once updatation of loss scale: N, default is 1000
@ -321,7 +322,6 @@ config for lossscale and etc.
### Parameters: ### Parameters:
``` ```
Parameters for dataset and network (Pre-Training/Fine-Tuning/Evaluation): Parameters for dataset and network (Pre-Training/Fine-Tuning/Evaluation):
batch_size batch size of input dataset: N, default is 16
seq_length length of input sequence: N, default is 128 seq_length length of input sequence: N, default is 128
vocab_size size of each embedding vector: N, must be consistant with the dataset you use. Default is 21136 vocab_size size of each embedding vector: N, must be consistant with the dataset you use. Default is 21136
hidden_size size of bert encoder layers: N, default is 768 hidden_size size of bert encoder layers: N, default is 768
@ -335,8 +335,6 @@ Parameters for dataset and network (Pre-Training/Fine-Tuning/Evaluation):
type_vocab_size size of token type vocab: N, default is 16 type_vocab_size size of token type vocab: N, default is 16
initializer_range initialization value of TruncatedNormal: Q, default is 0.02 initializer_range initialization value of TruncatedNormal: Q, default is 0.02
use_relative_positions use relative positions or not: True | False, default is False use_relative_positions use relative positions or not: True | False, default is False
input_mask_from_dataset use the input mask loaded form dataset or not: True | False, default is True
token_type_ids_from_dataset use the token type ids loaded from dataset or not: True | False, default is True
dtype data type of input: mstype.float16 | mstype.float32, default is mstype.float32 dtype data type of input: mstype.float16 | mstype.float32, default is mstype.float32
compute_type compute type in BertTransformer: mstype.float16 | mstype.float32, default is mstype.float16 compute_type compute type in BertTransformer: mstype.float16 | mstype.float32, default is mstype.float16

@ -19,7 +19,6 @@ from src.bert_model import BertModel
from src.bert_model import BertConfig from src.bert_model import BertConfig
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
bert_net_cfg_base = BertConfig( bert_net_cfg_base = BertConfig(
batch_size=32,
seq_length=128, seq_length=128,
vocab_size=21128, vocab_size=21128,
hidden_size=768, hidden_size=768,
@ -33,13 +32,10 @@ bert_net_cfg_base = BertConfig(
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=False, use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16 compute_type=mstype.float16
) )
bert_net_cfg_nezha = BertConfig( bert_net_cfg_nezha = BertConfig(
batch_size=32,
seq_length=128, seq_length=128,
vocab_size=21128, vocab_size=21128,
hidden_size=1024, hidden_size=1024,
@ -53,8 +49,6 @@ bert_net_cfg_nezha = BertConfig(
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=True, use_relative_positions=True,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16 compute_type=mstype.float16
) )
@ -63,15 +57,11 @@ def create_network(name, *args, **kwargs):
Create bert network for base and nezha. Create bert network for base and nezha.
''' '''
if name == 'bert_base': if name == 'bert_base':
if "batch_size" in kwargs:
bert_net_cfg_base.batch_size = kwargs["batch_size"]
if "seq_length" in kwargs: if "seq_length" in kwargs:
bert_net_cfg_base.seq_length = kwargs["seq_length"] bert_net_cfg_base.seq_length = kwargs["seq_length"]
is_training = kwargs.get("is_training", default=False) is_training = kwargs.get("is_training", default=False)
return BertModel(bert_net_cfg_base, is_training, *args) return BertModel(bert_net_cfg_base, is_training, *args)
if name == 'bert_nezha': if name == 'bert_nezha':
if "batch_size" in kwargs:
bert_net_cfg_nezha.batch_size = kwargs["batch_size"]
if "seq_length" in kwargs: if "seq_length" in kwargs:
bert_net_cfg_nezha.seq_length = kwargs["seq_length"] bert_net_cfg_nezha.seq_length = kwargs["seq_length"]
is_training = kwargs.get("is_training", default=False) is_training = kwargs.get("is_training", default=False)

@ -131,7 +131,7 @@ def bert_predict():
''' '''
devid = int(os.getenv('DEVICE_ID')) devid = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=devid) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=devid)
dataset = get_enwiki_512_dataset(bert_net_cfg.batch_size, 1) dataset = get_enwiki_512_dataset(cfg.batch_size, 1)
net_for_pretraining = BertPretrainEva(bert_net_cfg) net_for_pretraining = BertPretrainEva(bert_net_cfg)
net_for_pretraining.set_train(False) net_for_pretraining.set_train(False)
param_dict = load_checkpoint(cfg.finetune_ckpt) param_dict = load_checkpoint(cfg.finetune_ckpt)

@ -188,7 +188,7 @@ def run_classifier():
assessment_method=assessment_method) assessment_method=assessment_method)
if args_opt.do_train.lower() == "true": if args_opt.do_train.lower() == "true":
ds = create_classification_dataset(batch_size=bert_net_cfg.batch_size, repeat_count=1, ds = create_classification_dataset(batch_size=optimizer_cfg.batch_size, repeat_count=1,
assessment_method=assessment_method, assessment_method=assessment_method,
data_file_path=args_opt.train_data_file_path, data_file_path=args_opt.train_data_file_path,
schema_file_path=args_opt.schema_file_path, schema_file_path=args_opt.schema_file_path,
@ -204,7 +204,7 @@ def run_classifier():
ds.get_dataset_size(), epoch_num, "classifier") ds.get_dataset_size(), epoch_num, "classifier")
if args_opt.do_eval.lower() == "true": if args_opt.do_eval.lower() == "true":
ds = create_classification_dataset(batch_size=bert_net_cfg.batch_size, repeat_count=1, ds = create_classification_dataset(batch_size=optimizer_cfg.batch_size, repeat_count=1,
assessment_method=assessment_method, assessment_method=assessment_method,
data_file_path=args_opt.eval_data_file_path, data_file_path=args_opt.eval_data_file_path,
schema_file_path=args_opt.schema_file_path, schema_file_path=args_opt.schema_file_path,

@ -104,9 +104,9 @@ def do_eval(dataset=None, network=None, use_crf="", num_class=2, assessment_meth
if load_checkpoint_path == "": if load_checkpoint_path == "":
raise ValueError("Finetune model missed, evaluation task must load finetune model!") raise ValueError("Finetune model missed, evaluation task must load finetune model!")
if assessment_method == "clue_benchmark": if assessment_method == "clue_benchmark":
bert_net_cfg.batch_size = 1 optimizer_cfg.batch_size = 1
net_for_pretraining = network(bert_net_cfg, False, num_class, use_crf=(use_crf.lower() == "true"), net_for_pretraining = network(bert_net_cfg, optimizer_cfg.batch_size, False, num_class,
tag_to_index=tag_to_index) use_crf=(use_crf.lower() == "true"), tag_to_index=tag_to_index)
net_for_pretraining.set_train(False) net_for_pretraining.set_train(False)
param_dict = load_checkpoint(load_checkpoint_path) param_dict = load_checkpoint(load_checkpoint_path)
load_param_into_net(net_for_pretraining, param_dict) load_param_into_net(net_for_pretraining, param_dict)
@ -211,11 +211,11 @@ def run_ner():
number_labels = len(tag_to_index) number_labels = len(tag_to_index)
else: else:
number_labels = args_opt.num_class number_labels = args_opt.num_class
netwithloss = BertNER(bert_net_cfg, True, num_labels=number_labels, netwithloss = BertNER(bert_net_cfg, optimizer_cfg.batch_size, True, num_labels=number_labels,
use_crf=(args_opt.use_crf.lower() == "true"), use_crf=(args_opt.use_crf.lower() == "true"),
tag_to_index=tag_to_index, dropout_prob=0.1) tag_to_index=tag_to_index, dropout_prob=0.1)
if args_opt.do_train.lower() == "true": if args_opt.do_train.lower() == "true":
ds = create_ner_dataset(batch_size=bert_net_cfg.batch_size, repeat_count=1, ds = create_ner_dataset(batch_size=optimizer_cfg.batch_size, repeat_count=1,
assessment_method=assessment_method, data_file_path=args_opt.train_data_file_path, assessment_method=assessment_method, data_file_path=args_opt.train_data_file_path,
schema_file_path=args_opt.schema_file_path, schema_file_path=args_opt.schema_file_path,
do_shuffle=(args_opt.train_data_shuffle.lower() == "true")) do_shuffle=(args_opt.train_data_shuffle.lower() == "true"))

@ -107,7 +107,7 @@ def run_pretrain():
if args_opt.accumulation_steps > 1: if args_opt.accumulation_steps > 1:
logger.info("accumulation steps: {}".format(args_opt.accumulation_steps)) logger.info("accumulation steps: {}".format(args_opt.accumulation_steps))
logger.info("global batch size: {}".format(bert_net_cfg.batch_size * args_opt.accumulation_steps)) logger.info("global batch size: {}".format(cfg.batch_size * args_opt.accumulation_steps))
if args_opt.enable_data_sink == "true": if args_opt.enable_data_sink == "true":
args_opt.data_sink_steps *= args_opt.accumulation_steps args_opt.data_sink_steps *= args_opt.accumulation_steps
logger.info("data sink steps: {}".format(args_opt.data_sink_steps)) logger.info("data sink steps: {}".format(args_opt.data_sink_steps))

@ -123,7 +123,7 @@ def do_eval(dataset=None, vocab_file="", eval_json="", load_checkpoint_path="",
start = logits[1].asnumpy() start = logits[1].asnumpy()
end = logits[2].asnumpy() end = logits[2].asnumpy()
for i in range(bert_net_cfg.batch_size): for i in range(optimizer_cfg.batch_size):
unique_id = int(ids[i]) unique_id = int(ids[i])
start_logits = [float(x) for x in start[i].flat] start_logits = [float(x) for x in start[i].flat]
end_logits = [float(x) for x in end[i].flat] end_logits = [float(x) for x in end[i].flat]
@ -193,7 +193,7 @@ def run_squad():
netwithloss = BertSquad(bert_net_cfg, True, 2, dropout_prob=0.1) netwithloss = BertSquad(bert_net_cfg, True, 2, dropout_prob=0.1)
if args_opt.do_train.lower() == "true": if args_opt.do_train.lower() == "true":
ds = create_squad_dataset(batch_size=bert_net_cfg.batch_size, repeat_count=1, ds = create_squad_dataset(batch_size=optimizer_cfg.batch_size, repeat_count=1,
data_file_path=args_opt.train_data_file_path, data_file_path=args_opt.train_data_file_path,
schema_file_path=args_opt.schema_file_path, schema_file_path=args_opt.schema_file_path,
do_shuffle=(args_opt.train_data_shuffle.lower() == "true")) do_shuffle=(args_opt.train_data_shuffle.lower() == "true"))
@ -207,7 +207,7 @@ def run_squad():
ds.get_dataset_size(), epoch_num, "squad") ds.get_dataset_size(), epoch_num, "squad")
if args_opt.do_eval.lower() == "true": if args_opt.do_eval.lower() == "true":
ds = create_squad_dataset(batch_size=bert_net_cfg.batch_size, repeat_count=1, ds = create_squad_dataset(batch_size=optimizer_cfg.batch_size, repeat_count=1,
data_file_path=args_opt.eval_data_file_path, data_file_path=args_opt.eval_data_file_path,
schema_file_path=args_opt.schema_file_path, is_training=False, schema_file_path=args_opt.schema_file_path, is_training=False,
do_shuffle=(args_opt.eval_data_shuffle.lower() == "true")) do_shuffle=(args_opt.eval_data_shuffle.lower() == "true"))

@ -274,15 +274,15 @@ class BertNER(nn.Cell):
""" """
Train interface for sequence labeling finetuning task. Train interface for sequence labeling finetuning task.
""" """
def __init__(self, config, is_training, num_labels=11, use_crf=False, tag_to_index=None, dropout_prob=0.0, def __init__(self, config, batch_size, is_training, num_labels=11, use_crf=False,
use_one_hot_embeddings=False): tag_to_index=None, dropout_prob=0.0, use_one_hot_embeddings=False):
super(BertNER, self).__init__() super(BertNER, self).__init__()
self.bert = BertNERModel(config, is_training, num_labels, use_crf, dropout_prob, use_one_hot_embeddings) self.bert = BertNERModel(config, is_training, num_labels, use_crf, dropout_prob, use_one_hot_embeddings)
if use_crf: if use_crf:
if not tag_to_index: if not tag_to_index:
raise Exception("The dict for tag-index mapping should be provided for CRF.") raise Exception("The dict for tag-index mapping should be provided for CRF.")
from src.CRF import CRF from src.CRF import CRF
self.loss = CRF(tag_to_index, config.batch_size, config.seq_length, is_training) self.loss = CRF(tag_to_index, batch_size, config.seq_length, is_training)
else: else:
self.loss = CrossEntropyCalculation(is_training) self.loss = CrossEntropyCalculation(is_training)
self.num_labels = num_labels self.num_labels = num_labels

@ -92,9 +92,8 @@ class GetMaskedLMOutput(nn.Cell):
self.matmul = P.MatMul(transpose_b=True) self.matmul = P.MatMul(transpose_b=True)
self.log_softmax = nn.LogSoftmax(axis=-1) self.log_softmax = nn.LogSoftmax(axis=-1)
self.shape_flat_offsets = (-1, 1) self.shape_flat_offsets = (-1, 1)
self.rng = Tensor(np.array(range(0, config.batch_size)).astype(np.int32))
self.last_idx = (-1,) self.last_idx = (-1,)
self.shape_flat_sequence_tensor = (config.batch_size * config.seq_length, self.width) self.shape_flat_sequence_tensor = (-1, self.width)
self.seq_length_tensor = Tensor(np.array((config.seq_length,)).astype(np.int32)) self.seq_length_tensor = Tensor(np.array((config.seq_length,)).astype(np.int32))
self.cast = P.Cast() self.cast = P.Cast()
self.compute_type = config.compute_type self.compute_type = config.compute_type
@ -105,8 +104,8 @@ class GetMaskedLMOutput(nn.Cell):
output_weights, output_weights,
positions): positions):
"""Get output log_probs""" """Get output log_probs"""
flat_offsets = self.reshape( rng = F.tuple_to_array(F.make_range(P.Shape()(input_tensor)[0]))
self.rng * self.seq_length_tensor, self.shape_flat_offsets) flat_offsets = self.reshape(rng * self.seq_length_tensor, self.shape_flat_offsets)
flat_position = self.reshape(positions + flat_offsets, self.last_idx) flat_position = self.reshape(positions + flat_offsets, self.last_idx)
flat_sequence_tensor = self.reshape(input_tensor, self.shape_flat_sequence_tensor) flat_sequence_tensor = self.reshape(input_tensor, self.shape_flat_sequence_tensor)
input_tensor = self.gather(flat_sequence_tensor, flat_position, 0) input_tensor = self.gather(flat_sequence_tensor, flat_position, 0)

File diff suppressed because it is too large Load Diff

@ -19,6 +19,7 @@ from easydict import EasyDict as edict
import mindspore.common.dtype as mstype import mindspore.common.dtype as mstype
from .bert_model import BertConfig from .bert_model import BertConfig
cfg = edict({ cfg = edict({
'batch_size': 32,
'bert_network': 'base', 'bert_network': 'base',
'loss_scale_value': 65536, 'loss_scale_value': 65536,
'scale_factor': 2, 'scale_factor': 2,
@ -57,7 +58,6 @@ large: BERT-NEZHA(a Chinese pretrained language model developed by Huawei, which
''' '''
if cfg.bert_network == 'base': if cfg.bert_network == 'base':
bert_net_cfg = BertConfig( bert_net_cfg = BertConfig(
batch_size=64,
seq_length=128, seq_length=128,
vocab_size=21128, vocab_size=21128,
hidden_size=768, hidden_size=768,
@ -71,14 +71,11 @@ if cfg.bert_network == 'base':
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=False, use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16 compute_type=mstype.float16
) )
if cfg.bert_network == 'nezha': if cfg.bert_network == 'nezha':
bert_net_cfg = BertConfig( bert_net_cfg = BertConfig(
batch_size=96,
seq_length=128, seq_length=128,
vocab_size=21128, vocab_size=21128,
hidden_size=1024, hidden_size=1024,
@ -92,14 +89,11 @@ if cfg.bert_network == 'nezha':
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=True, use_relative_positions=True,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16 compute_type=mstype.float16
) )
if cfg.bert_network == 'large': if cfg.bert_network == 'large':
bert_net_cfg = BertConfig( bert_net_cfg = BertConfig(
batch_size=24,
seq_length=512, seq_length=512,
vocab_size=30522, vocab_size=30522,
hidden_size=1024, hidden_size=1024,
@ -113,8 +107,6 @@ if cfg.bert_network == 'large':
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=False, use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16 compute_type=mstype.float16
) )

@ -20,7 +20,7 @@ import mindspore.common.dtype as mstype
import mindspore.dataset.engine.datasets as de import mindspore.dataset.engine.datasets as de
import mindspore.dataset.transforms.c_transforms as C import mindspore.dataset.transforms.c_transforms as C
from mindspore import log as logger from mindspore import log as logger
from .config import bert_net_cfg from .config import cfg
def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None): def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None):
@ -46,7 +46,7 @@ def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None,
ds = ds.map(operations=type_cast_op, input_columns="input_mask") ds = ds.map(operations=type_cast_op, input_columns="input_mask")
ds = ds.map(operations=type_cast_op, input_columns="input_ids") ds = ds.map(operations=type_cast_op, input_columns="input_ids")
# apply batch operations # apply batch operations
ds = ds.batch(bert_net_cfg.batch_size, drop_remainder=True) ds = ds.batch(cfg.batch_size, drop_remainder=True)
logger.info("data size: {}".format(ds.get_dataset_size())) logger.info("data size: {}".format(ds.get_dataset_size()))
logger.info("repeat count: {}".format(ds.get_repeat_count())) logger.info("repeat count: {}".format(ds.get_repeat_count()))
return ds return ds

@ -22,6 +22,7 @@ import mindspore.common.dtype as mstype
from .bert_model import BertConfig from .bert_model import BertConfig
optimizer_cfg = edict({ optimizer_cfg = edict({
'batch_size': 16,
'optimizer': 'Lamb', 'optimizer': 'Lamb',
'AdamWeightDecay': edict({ 'AdamWeightDecay': edict({
'learning_rate': 2e-5, 'learning_rate': 2e-5,
@ -45,7 +46,6 @@ optimizer_cfg = edict({
}) })
bert_net_cfg = BertConfig( bert_net_cfg = BertConfig(
batch_size=16,
seq_length=128, seq_length=128,
vocab_size=21128, vocab_size=21128,
hidden_size=768, hidden_size=768,
@ -59,8 +59,6 @@ bert_net_cfg = BertConfig(
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=False, use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16, compute_type=mstype.float16,
) )

@ -107,7 +107,7 @@ class BertNERModel(nn.Cell):
self.reshape = P.Reshape() self.reshape = P.Reshape()
self.shape = (-1, config.hidden_size) self.shape = (-1, config.hidden_size)
self.use_crf = use_crf self.use_crf = use_crf
self.origin_shape = (config.batch_size, config.seq_length, self.num_labels) self.origin_shape = (-1, config.seq_length, self.num_labels)
def construct(self, input_ids, input_mask, token_type_id): def construct(self, input_ids, input_mask, token_type_id):
"""Return the final logits as the results of log_softmax.""" """Return the final logits as the results of log_softmax."""

@ -41,11 +41,10 @@ DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"]
SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json" SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json"
def get_config(version='base', batch_size=1): def get_config(version='base'):
"""get config""" """get config"""
if version == 'base': if version == 'base':
bert_config = BertConfig( bert_config = BertConfig(
batch_size=batch_size,
seq_length=128, seq_length=128,
vocab_size=21136, vocab_size=21136,
hidden_size=768, hidden_size=768,
@ -59,13 +58,10 @@ def get_config(version='base', batch_size=1):
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=True, use_relative_positions=True,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float32) compute_type=mstype.float32)
elif version == 'large': elif version == 'large':
bert_config = BertConfig( bert_config = BertConfig(
batch_size=batch_size,
seq_length=128, seq_length=128,
vocab_size=21136, vocab_size=21136,
hidden_size=1024, hidden_size=1024,
@ -79,12 +75,10 @@ def get_config(version='base', batch_size=1):
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=False, use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16) compute_type=mstype.float16)
else: else:
bert_config = BertConfig(batch_size=batch_size) bert_config = BertConfig()
return bert_config return bert_config
@ -186,8 +180,7 @@ def test_bert_performance():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False)
ds, new_repeat_count, sink_size = me_de_train_dataset(sink_mode=True) ds, new_repeat_count, sink_size = me_de_train_dataset(sink_mode=True)
version = os.getenv('VERSION', 'large') version = os.getenv('VERSION', 'large')
batch_size = 16 config = get_config(version=version)
config = get_config(version=version, batch_size=batch_size)
netwithloss = BertNetworkWithLoss(config, True) netwithloss = BertNetworkWithLoss(config, True)
lr = BertLearningRate(decay_steps=sink_size * new_repeat_count, lr = BertLearningRate(decay_steps=sink_size * new_repeat_count,

@ -41,11 +41,10 @@ DATA_DIR = ["/home/workspace/mindspore_dataset/bert/example/examples.tfrecord"]
SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json" SCHEMA_DIR = "/home/workspace/mindspore_dataset/bert/example/datasetSchema.json"
def get_config(version='base', batch_size=1): def get_config(version='base'):
"""get config""" """get config"""
if version == 'base': if version == 'base':
bert_config = BertConfig( bert_config = BertConfig(
batch_size=batch_size,
seq_length=128, seq_length=128,
vocab_size=21136, vocab_size=21136,
hidden_size=768, hidden_size=768,
@ -59,13 +58,10 @@ def get_config(version='base', batch_size=1):
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=True, use_relative_positions=True,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float32) compute_type=mstype.float32)
elif version == 'large': elif version == 'large':
bert_config = BertConfig( bert_config = BertConfig(
batch_size=batch_size,
seq_length=128, seq_length=128,
vocab_size=21136, vocab_size=21136,
hidden_size=1024, hidden_size=1024,
@ -79,12 +75,10 @@ def get_config(version='base', batch_size=1):
type_vocab_size=2, type_vocab_size=2,
initializer_range=0.02, initializer_range=0.02,
use_relative_positions=False, use_relative_positions=False,
input_mask_from_dataset=True,
token_type_ids_from_dataset=True,
dtype=mstype.float32, dtype=mstype.float32,
compute_type=mstype.float16) compute_type=mstype.float16)
else: else:
bert_config = BertConfig(batch_size=batch_size) bert_config = BertConfig()
return bert_config return bert_config
@ -185,8 +179,7 @@ def test_bert_percision():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", reserve_class_name_in_scope=False)
ds, new_repeat_count, _ = me_de_train_dataset() ds, new_repeat_count, _ = me_de_train_dataset()
version = os.getenv('VERSION', 'large') version = os.getenv('VERSION', 'large')
batch_size = 16 config = get_config(version=version)
config = get_config(version=version, batch_size=batch_size)
netwithloss = BertNetworkWithLoss(config, True) netwithloss = BertNetworkWithLoss(config, True)
lr = BertLearningRate(decay_steps=ds.get_dataset_size()*new_repeat_count, lr = BertLearningRate(decay_steps=ds.get_dataset_size()*new_repeat_count,
learning_rate=5e-5, end_learning_rate=1e-9, learning_rate=5e-5, end_learning_rate=1e-9,

Loading…
Cancel
Save