From 8b97a3a44ff930c7f489fe9aa626692eb373bffc Mon Sep 17 00:00:00 2001
From: dzhwinter <dzhwinter@gmail.com>
Date: Wed, 30 Jan 2019 14:43:27 +0800
Subject: [PATCH] rerun ci. test=develop

---
 python/paddle/fluid/framework.py                            | 4 ++--
 python/paddle/fluid/io.py                                   | 2 +-
 python/paddle/fluid/parallel_executor.py                    | 1 +
 .../paddle/fluid/tests/unittests/test_inference_model_io.py | 6 +++---
 .../fluid/transpiler/memory_optimization_transpiler.py      | 4 ++--
 5 files changed, 9 insertions(+), 8 deletions(-)

diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py
index 6f6d94a23d..45f5f6ea87 100644
--- a/python/paddle/fluid/framework.py
+++ b/python/paddle/fluid/framework.py
@@ -1735,7 +1735,7 @@ class Program(object):
         return self.__is_optimized
 
     @_is_optimized.setter
-    def set__is_optimized(self, target):
+    def _is_optimized(self, target):
         self.__is_optimized = target
 
     @property
@@ -1756,7 +1756,7 @@ class Program(object):
         return self._current_role
 
     @op_role.setter
-    def set_op_role(self, role):
+    def op_role(self, role):
         self._current_role = role
 
     @property
diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py
index 836b28a561..3ae7fddaac 100644
--- a/python/paddle/fluid/io.py
+++ b/python/paddle/fluid/io.py
@@ -931,7 +931,7 @@ def save_inference_model(dirname,
 
     if main_program is None:
         main_program = default_main_program()
-        if main_program.is_optimized:
+        if main_program._is_optimized:
             warnings.warn(
                 "save_inference_model must put before you call memory_optimize. \
                                             the memory_optimize will modify the original program, \
diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py
index a07ff6ac69..c55bc46cc9 100644
--- a/python/paddle/fluid/parallel_executor.py
+++ b/python/paddle/fluid/parallel_executor.py
@@ -135,6 +135,7 @@ class ParallelExecutor(object):
         # step3: init build_strategy
         if build_strategy is None:
             build_strategy = BuildStrategy()
+        build_strategy.enable_inplace = False if main._is_optimized else True
         build_strategy.num_trainers = num_trainers
         build_strategy.trainer_id = trainer_id
         # FIXME(zcd): is_distribution_ is a temporary field, because in pserver mode,
diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py
index 0b1836ce4d..d260afcd62 100644
--- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py
+++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py
@@ -108,9 +108,9 @@ class TestSaveInferenceModel(unittest.TestCase):
         exe.run(init_program, feed={}, fetch_list=[])
 
         memory_optimize(program, print_log=True)
-        self.assertRaises(RuntimeError,
-                          save_inference_model(MODEL_DIR, ["x", "y"],
-                                               [avg_cost], exe, program))
+        self.assertEqual(program._is_optimized, True)
+        # will print warning message
+        save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program)
 
 
 if __name__ == '__main__':
diff --git a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py
index 2e4dbfcdc9..fc8dafbe97 100755
--- a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py
+++ b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py
@@ -540,7 +540,7 @@ def memory_optimize(input_program,
     if skip_opt_set is not None:
         skip_opt_set = set(map(to_name_str, skip_opt_set))
     cfgs = _get_cfgs(input_program)
-    input_program.is_optimized = True
+    input_program._is_optimized = True
     for cfg in cfgs:
         cfg.memory_optimize(skip_opt_set=skip_opt_set, level=level)
 
@@ -560,6 +560,6 @@ def release_memory(input_program, skip_opt_set=None):
         None
     """
     cfgs = _get_cfgs(input_program)
-    input_program.is_optimized = True
+    input_program._is_optimized = True
     for cfg in cfgs:
         cfg.release_memory(skip_opt_set=skip_opt_set)