diff --git a/model_zoo/official/cv/cnnctc/README_CN.md b/model_zoo/official/cv/cnnctc/README_CN.md index 0c0450fd22..35b8900e50 100644 --- a/model_zoo/official/cv/cnnctc/README_CN.md +++ b/model_zoo/official/cv/cnnctc/README_CN.md @@ -261,7 +261,7 @@ bash scripts/run_eval_ascend.sh $TRAINED_CKPT ### 训练性能 -| 参数 | Faster R-CNN | +| 参数 | CNNCTC | | -------------------------- | ----------------------------------------------------------- | | 模型版本 | V1 | | 资源 | Ascend 910;CPU 2.60GHz,192核;内存:755G | @@ -278,7 +278,7 @@ bash scripts/run_eval_ascend.sh $TRAINED_CKPT ### 评估性能 -| 参数 | Faster R-CNN | +| 参数 | CNNCTC | | ------------------- | --------------------------- | | 模型版本 | V1 | | 资源 | Ascend 910 | diff --git a/model_zoo/official/cv/densenet121/scripts/run_distribute_eval.sh b/model_zoo/official/cv/densenet121/scripts/run_distribute_eval.sh index 21e2761cfb..74c52226e2 100644 --- a/model_zoo/official/cv/densenet121/scripts/run_distribute_eval.sh +++ b/model_zoo/official/cv/densenet121/scripts/run_distribute_eval.sh @@ -15,13 +15,13 @@ # ============================================================================ echo "==============================================================================================================" -echo "Please run the scipt as: " +echo "Please run the script as: " echo "sh scripts/run_distribute_eval.sh DEVICE_NUM RANK_TABLE_FILE DATASET CKPT_PATH" echo "for example: sh scripts/run_distribute_train.sh 8 /data/hccl.json /path/to/dataset /path/to/ckpt" echo "It is better to use absolute path." echo "=================================================================================================================" -echo "After running the scipt, the network runs in the background. The log will be generated in eval_x/log.txt" +echo "After running the script, the network runs in the background. The log will be generated in eval_x/log.txt" export RANK_SIZE=$1 export RANK_TABLE_FILE=$2 @@ -37,7 +37,7 @@ do cp -r ./src ./eval_$i cd ./eval_$i || exit export RANK_ID=$i - echo "start infering for rank $i, device $DEVICE_ID" + echo "start inferring for rank $i, device $DEVICE_ID" env > env.log python eval.py \ --data_dir=$DATASET \ diff --git a/model_zoo/official/cv/densenet121/scripts/run_distribute_train.sh b/model_zoo/official/cv/densenet121/scripts/run_distribute_train.sh index 51f16649b1..0a597c11d5 100644 --- a/model_zoo/official/cv/densenet121/scripts/run_distribute_train.sh +++ b/model_zoo/official/cv/densenet121/scripts/run_distribute_train.sh @@ -15,13 +15,13 @@ # ============================================================================ echo "==============================================================================================================" -echo "Please run the scipt as: " +echo "Please run the script as: " echo "sh scripts/run_distribute_train.sh DEVICE_NUM RANK_TABLE_FILE DATASET CKPT_FILE" echo "for example: sh scripts/run_distribute_train.sh 8 /data/hccl.json /path/to/dataset ckpt_file" echo "It is better to use absolute path." echo "=================================================================================================================" -echo "After running the scipt, the network runs in the background. The log will be generated in train_x/log.txt" +echo "After running the script, the network runs in the background. The log will be generated in train_x/log.txt" export RANK_SIZE=$1 export RANK_TABLE_FILE=$2 diff --git a/model_zoo/official/cv/densenet121/src/network/densenet.py b/model_zoo/official/cv/densenet121/src/network/densenet.py index 42a0665a8e..27708faf73 100644 --- a/model_zoo/official/cv/densenet121/src/network/densenet.py +++ b/model_zoo/official/cv/densenet121/src/network/densenet.py @@ -121,7 +121,7 @@ class _DenseBlock(nn.Cell): class _Transition(nn.Cell): """ - the transiton layer + the transition layer """ def __init__(self, num_input_features, num_output_features): super(_Transition, self).__init__() @@ -203,7 +203,7 @@ def _densenet201(**kwargs): class DenseNet121(nn.Cell): """ - the densenet121 architectur + the densenet121 architecture """ def __init__(self, num_classes, include_top=True): super(DenseNet121, self).__init__() diff --git a/model_zoo/official/cv/inceptionv4/README.md b/model_zoo/official/cv/inceptionv4/README.md index 2522f842f7..aed4fb7cc6 100644 --- a/model_zoo/official/cv/inceptionv4/README.md +++ b/model_zoo/official/cv/inceptionv4/README.md @@ -139,7 +139,7 @@ sh scripts/run_standalone_train_ascend.sh DEVICE_ID DATA_DIR ### Result -Training result will be stored in the example path. Checkpoints will be stored at `ckpt_path` by default, and training log will be redirected to `./log.txt` like followings. +Training result will be stored in the example path. Checkpoints will be stored at `ckpt_path` by default, and training log will be redirected to `./log.txt` like following. ```python epoch: 1 step: 1251, loss is 5.4833196 @@ -175,7 +175,7 @@ You can start training using python or shell scripts. The usage of shell scripts ### Result -Evaluation result will be stored in the example path, you can find result like the followings in `eval.log`. +Evaluation result will be stored in the example path, you can find result like the following in `eval.log`. ```python metric: {'Loss': 0.9849, 'Top1-Acc':0.7985, 'Top5-Acc':0.9460} diff --git a/model_zoo/official/cv/yolov4/eval.py b/model_zoo/official/cv/yolov4/eval.py index c6f4b0679c..5f4aa24e3b 100644 --- a/model_zoo/official/cv/yolov4/eval.py +++ b/model_zoo/official/cv/yolov4/eval.py @@ -115,7 +115,7 @@ class DetectionEngine: def _nms(self, predicts, threshold): """Calculate NMS.""" - # conver xywh -> xmin ymin xmax ymax + # convert xywh -> xmin ymin xmax ymax x1 = predicts[:, 0] y1 = predicts[:, 1] x2 = x1 + predicts[:, 2] @@ -139,13 +139,13 @@ class DetectionEngine: intersect_area = intersect_w * intersect_h ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area) - indexs = np.where(ovr <= threshold)[0] - order = order[indexs + 1] + indexes = np.where(ovr <= threshold)[0] + order = order[indexes + 1] return reserved_boxes def _diou_nms(self, dets, thresh=0.5): """ - conver xywh -> xmin ymin xmax ymax + convert xywh -> xmin ymin xmax ymax """ x1 = dets[:, 0] y1 = dets[:, 1] @@ -248,7 +248,7 @@ class DetectionEngine: x_top_left = x - w / 2. y_top_left = y - h / 2. - # creat all False + # create all False flag = np.random.random(cls_emb.shape) > sys.maxsize for i in range(flag.shape[0]): c = cls_argmax[i] diff --git a/model_zoo/official/cv/yolov4/scripts/run_eval.sh b/model_zoo/official/cv/yolov4/scripts/run_eval.sh index ff6b79e516..0ac33dc60b 100644 --- a/model_zoo/official/cv/yolov4/scripts/run_eval.sh +++ b/model_zoo/official/cv/yolov4/scripts/run_eval.sh @@ -58,7 +58,7 @@ cp ../*.py ./eval cp -r ../src ./eval cd ./eval || exit env > env.log -echo "start infering for device $DEVICE_ID" +echo "start inferring for device $DEVICE_ID" python eval.py \ --data_dir=$DATASET_PATH \ --pretrained=$CHECKPOINT_PATH \ diff --git a/model_zoo/official/cv/yolov4/scripts/run_test.sh b/model_zoo/official/cv/yolov4/scripts/run_test.sh index d0eaa616ad..ad195423cc 100644 --- a/model_zoo/official/cv/yolov4/scripts/run_test.sh +++ b/model_zoo/official/cv/yolov4/scripts/run_test.sh @@ -58,7 +58,7 @@ cp ../*.py ./test cp -r ../src ./test cd ./test || exit env > env.log -echo "start infering for device $DEVICE_ID" +echo "start inferring for device $DEVICE_ID" python test.py \ --data_dir=$DATASET_PATH \ --pretrained=$CHECKPOINT_PATH \ diff --git a/model_zoo/official/cv/yolov4/src/yolo.py b/model_zoo/official/cv/yolov4/src/yolo.py index 0cb0bf2fcf..b9c9c422df 100644 --- a/model_zoo/official/cv/yolov4/src/yolo.py +++ b/model_zoo/official/cv/yolov4/src/yolo.py @@ -59,7 +59,7 @@ class YoloBlock(nn.Cell): Args: in_channels: Integer. Input channel. - out_chls: Interger. Middle channel. + out_chls: Integer. Middle channel. out_channels: Integer. Output channel. Returns: @@ -111,7 +111,7 @@ class YOLOv4(nn.Cell): feature_shape: List. Input image shape, [N,C,H,W]. backbone_shape: List. Darknet output channels shape. backbone: Cell. Backbone Network. - out_channel: Interger. Output channel. + out_channel: Integer. Output channel. Returns: Tensor, output tensor. diff --git a/model_zoo/official/cv/yolov4/src/yolo_dataset.py b/model_zoo/official/cv/yolov4/src/yolo_dataset.py index dcc7748d74..73484f6fb8 100644 --- a/model_zoo/official/cv/yolov4/src/yolo_dataset.py +++ b/model_zoo/official/cv/yolov4/src/yolo_dataset.py @@ -45,7 +45,7 @@ def has_valid_annotation(anno): # if all boxes have close to zero area, there is no annotation if _has_only_empty_bbox(anno): return False - # keypoints task have a slight different critera for considering + # keypoints task have a slight different criteria for considering # if an annotation is valid if "keypoints" not in anno[0]: return True diff --git a/model_zoo/official/cv/yolov4/test.py b/model_zoo/official/cv/yolov4/test.py index fc7f3025b3..b3048221b2 100644 --- a/model_zoo/official/cv/yolov4/test.py +++ b/model_zoo/official/cv/yolov4/test.py @@ -107,7 +107,7 @@ class DetectionEngine(): def _nms(self, dets, thresh): """nms function""" - # conver xywh -> xmin ymin xmax ymax + # convert xywh -> xmin ymin xmax ymax x1 = dets[:, 0] y1 = dets[:, 1] x2 = x1 + dets[:, 2] @@ -137,7 +137,7 @@ class DetectionEngine(): def _diou_nms(self, dets, thresh=0.5): - """conver xywh -> xmin ymin xmax ymax""" + """convert xywh -> xmin ymin xmax ymax""" x1 = dets[:, 0] y1 = dets[:, 1] x2 = x1 + dets[:, 2] @@ -223,7 +223,7 @@ class DetectionEngine(): x_top_left = x - w / 2. y_top_left = y - h / 2. - # creat all False + # create all False flag = np.random.random(cls_emb.shape) > sys.maxsize for i in range(flag.shape[0]): c = cls_argmax[i] diff --git a/model_zoo/research/cv/FaceDetection/src/FaceDetection/voc_wrapper.py b/model_zoo/research/cv/FaceDetection/src/FaceDetection/voc_wrapper.py index 616ef578f9..623033d00b 100644 --- a/model_zoo/research/cv/FaceDetection/src/FaceDetection/voc_wrapper.py +++ b/model_zoo/research/cv/FaceDetection/src/FaceDetection/voc_wrapper.py @@ -70,8 +70,8 @@ def nms(boxes, threshold=0.5): intersect_area = intersect_w * intersect_h ovr = intersect_area / (areas[i] + areas[order[1:]] - intersect_area) - indexs = np.where(ovr <= threshold)[0] - order = order[indexs + 1] + indexes = np.where(ovr <= threshold)[0] + order = order[indexes + 1] return reserved_boxes diff --git a/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_eval.py b/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_eval.py index 5a34023bc3..1c5c491663 100644 --- a/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_eval.py +++ b/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_eval.py @@ -40,7 +40,7 @@ def prepare_file_paths(): image_names = [] for dataset_root in dataset_root_list: if not os.path.isdir(dataset_root): - raise ValueError("dataset root is unvalid!") + raise ValueError("dataset root is invalid!") anno_dir = os.path.join(dataset_root, "Annotations") image_dir = os.path.join(dataset_root, "JPEGImages") if is_train: diff --git a/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train.py b/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train.py index eddfa13fa6..7571edde75 100644 --- a/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train.py +++ b/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train.py @@ -38,7 +38,7 @@ def prepare_file_paths(): anno_files = [] for dataset_root in dataset_root_list: if not os.path.isdir(dataset_root): - raise ValueError("dataset root is unvalid!") + raise ValueError("dataset root is invalid!") anno_dir = os.path.join(dataset_root, "Annotations") image_dir = os.path.join(dataset_root, "JPEGImages") if is_train: diff --git a/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train_append.py b/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train_append.py index 07cc44580d..6a4142ad74 100644 --- a/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train_append.py +++ b/model_zoo/research/cv/FaceDetection/src/data_to_mindrecord_train_append.py @@ -39,7 +39,7 @@ def prepare_file_paths(): anno_files = [] for dataset_root in dataset_root_list: if not os.path.isdir(dataset_root): - raise ValueError("dataset root is unvalid!") + raise ValueError("dataset root is invalid!") anno_dir = os.path.join(dataset_root, "Annotations") image_dir = os.path.join(dataset_root, "JPEGImages") if is_train: diff --git a/model_zoo/research/cv/FaceRecognition/eval.py b/model_zoo/research/cv/FaceRecognition/eval.py index e02561b133..52d3f242f3 100644 --- a/model_zoo/research/cv/FaceRecognition/eval.py +++ b/model_zoo/research/cv/FaceRecognition/eval.py @@ -139,7 +139,7 @@ def get_model(args): load_param_into_net(net, param_dict_new) args.logger.info('INFO, ------------- load model success--------------') else: - args.logger.info('ERROR, not supprot file:{}, please check weight in config.py'.format(args.weight)) + args.logger.info('ERROR, not support file:{}, please check weight in config.py'.format(args.weight)) return 0 net.set_train(False) return net