commit
d1d21004dd
@ -0,0 +1,58 @@
|
||||
## Evaluator Design
|
||||
|
||||
### The Problem
|
||||
|
||||
During training or serving, we provide the evaluation function to measure the model performance, e.g., accuracy, precision. In the operator based framework design, the data go through the network pipeline batch by batch. As a result, inside the operator, we only can calculate one minibatch metrics. We need to provide a mechanism to calculate the metrics for each N pass/batch the user wanted.
|
||||
|
||||
### Evaluator Design
|
||||
Currently, every operation is expressed in the graph. we divide the evaluator process into three steps.
|
||||
|
||||
1. Initialize the metric state and add it into the block.
|
||||
|
||||
2. Calculate the statistic of the metric state in every mini-batch. The single operator is only responsible for calculating necessary statistics for one mini-batch. For example, accuracy operator only calculate a minibatch data if run once.
|
||||
|
||||
|
||||
3. Merge the mini-batch statistics to form the evaluation result for multiple mini-batches. When it comes to distributed training/Multi-GPU training, aggregate the value from different devices.
|
||||
|
||||
### Implementation
|
||||
This design is shown in python API.
|
||||
Each metric operator need to caculate the metric statistic and return the batch aware states, Python side responsible for accumulate the states for each pass.
|
||||
|
||||
|
||||
```python
|
||||
class Evaluator(object):
|
||||
"""
|
||||
Evaluator Base class.
|
||||
"""
|
||||
def __init__(self, name, **kwargs):
|
||||
"""
|
||||
Different evaluator may has different metric states. E.g, Accuracy need two variables, total and right sample counts.
|
||||
Auc need four variables, `true_positives`,
|
||||
`true_negatives`, `false_positives` and `false_negatives`. So every evaluator should create its needed variables and append to main_program
|
||||
|
||||
The initialization of Evaluator should be responsible for:
|
||||
create metric states and append to the main_program
|
||||
"""
|
||||
pass
|
||||
|
||||
def _update_ops(self, input, label, **kwargs)
|
||||
"""
|
||||
Add mini-batch evaluator caculate operators to the main_program.
|
||||
Add increment operator to accumulate the metric states.
|
||||
"""
|
||||
|
||||
|
||||
def reset(self, executor, reset_program=None):
|
||||
"""
|
||||
Reset metric states at the begin of each pass/user specified batch number.
|
||||
Execute the reset_program to reset the states.
|
||||
"""
|
||||
|
||||
|
||||
def eval(self, executor, eval_program=None):
|
||||
"""
|
||||
Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
|
||||
Execute the eval_program and return the result.
|
||||
"""
|
||||
return eval_result
|
||||
```
|
@ -1,59 +1,187 @@
|
||||
import paddle.v2.fluid.op as op
|
||||
import numpy as np
|
||||
from paddle.v2.fluid.framework import Program, g_main_program, unique_name, Variable
|
||||
import paddle.v2.fluid.core as core
|
||||
|
||||
|
||||
def avg_accumulate(accumulated_var, per_eval, num_batches, place):
|
||||
t = np.array(accumulated_var.get_tensor())
|
||||
t[0] += per_eval[0]
|
||||
accumulated_var.get_tensor().set([t[0] / float(num_batches)], place)
|
||||
def _clone_var_in_block_(block, var):
|
||||
assert isinstance(var, Variable)
|
||||
return block.create_var(
|
||||
name=var.name,
|
||||
shape=var.shape,
|
||||
dtype=var.data_type,
|
||||
type=var.type,
|
||||
lod_level=var.lod_level,
|
||||
persistable=True)
|
||||
|
||||
|
||||
class Evaluator(object):
|
||||
def __init__(self,
|
||||
scope,
|
||||
operator='accuracy',
|
||||
input='Inference',
|
||||
label='Label',
|
||||
output='Output',
|
||||
place=core.CPUPlace()):
|
||||
"""
|
||||
create an evaluator for evaluating the inference.
|
||||
NOTE: default run on CPUPlace(), running on GPUPlace doesn't improve performance much.
|
||||
|
||||
:param scope: the scope instance contains the input.
|
||||
:type scope: paddle.v2.fluid.core.scope
|
||||
:param operator: operator name for caculating the evaluation for each mini-batch.
|
||||
:type operator: string
|
||||
:param input: output variable name of forward network.
|
||||
:type input: string
|
||||
:param label: variable name of label
|
||||
:type label: string
|
||||
"""
|
||||
self.scope = scope
|
||||
self.place = place
|
||||
self.output_name = output
|
||||
self.num_batches = 0
|
||||
# create variable to store accumulated evaluator output
|
||||
eval_name = ''.join([operator, "@Eval"])
|
||||
if scope.find_var(eval_name):
|
||||
raise Exception("evaluator already exist in scope: %s" % eval_name)
|
||||
self.accumulated_var = scope.var(eval_name)
|
||||
t = self.accumulated_var.get_tensor()
|
||||
t.set_dims((1, ))
|
||||
t.set([0.0], place)
|
||||
# self.accumulated_var = block.create_var(block, name=eval_name, shape=(1,))
|
||||
# self.accumulated_var.get_tensor().set([0.0])
|
||||
# create operator of evaluation
|
||||
var_map = dict() # var name -> variable
|
||||
var_map[input] = [input]
|
||||
var_map[label] = [label]
|
||||
var_map[output] = [output]
|
||||
self.op = op.Operator(operator, **var_map)
|
||||
|
||||
def evaluate(self, ctx, accumulator=avg_accumulate):
|
||||
self.op.run(self.scope, ctx)
|
||||
per_eval = np.array(self.scope.find_var(self.output_name).get_tensor())
|
||||
self.num_batches += 1
|
||||
accumulator(self.accumulated_var, per_eval, self.num_batches,
|
||||
self.place)
|
||||
"""
|
||||
Evalutor Base class.
|
||||
|
||||
create metric states
|
||||
add mini-batch evaluator caculate operator
|
||||
add increment operator to accumulate the metric states
|
||||
"""
|
||||
|
||||
def __init__(self, name, **kwargs):
|
||||
"""
|
||||
init the global states
|
||||
"""
|
||||
self._states = {}
|
||||
if kwargs.has_key("main_program"):
|
||||
self._main_program = kwargs.get("main_program")
|
||||
else:
|
||||
self._main_program = g_main_program
|
||||
|
||||
def _update_ops(self, *args, **kwargs):
|
||||
"""
|
||||
append update ops to the global states
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def reset(self, executor, reset_program=None):
|
||||
"""
|
||||
Clear metric states at the begin of each pass/user specified batch
|
||||
"""
|
||||
if reset_program == None:
|
||||
reset_program = Program()
|
||||
else:
|
||||
reset_program = program
|
||||
block = reset_program.global_block()
|
||||
for k, var in self._states.iteritems():
|
||||
g_var = _clone_var_in_block_(block, var)
|
||||
zeros = block.create_var(dtype="float32", persistable=True)
|
||||
block.append_op(
|
||||
type="fill_constant",
|
||||
outputs={"Out": [zeros]},
|
||||
attrs={
|
||||
"shape": g_var.shape,
|
||||
"value": .0,
|
||||
"data_type": 5,
|
||||
})
|
||||
block.append_op(
|
||||
type="scale", inputs={"X": zeros}, outputs={"Out": g_var})
|
||||
executor.run(reset_program, fetch_list=self._states.values())
|
||||
|
||||
def eval(self, executor, eval_program=None):
|
||||
"""
|
||||
Merge the mini-batch statistics to form the evaluation result for multiple mini-batches.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class Accuracy(Evaluator):
|
||||
"""
|
||||
Accuracy need two state variable Total, Correct
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Accuracy, self).__init__("accuracy", **kwargs)
|
||||
block = self._main_program.global_block()
|
||||
g_total = block.create_var(
|
||||
name=unique_name("Total"),
|
||||
persistable=True,
|
||||
dtype="int64",
|
||||
shape=[1])
|
||||
g_correct = block.create_var(
|
||||
name=unique_name("Correct"),
|
||||
persistable=True,
|
||||
dtype="int64",
|
||||
shape=[1])
|
||||
self._states["Total"] = g_total
|
||||
self._states["Correct"] = g_correct
|
||||
|
||||
def _update_ops(self, input, label, k=1, **kwargs):
|
||||
block = self._main_program.global_block()
|
||||
topk_out = block.create_var(dtype=input.data_type)
|
||||
topk_indices = block.create_var(dtype="int64")
|
||||
block.append_op(
|
||||
type="top_k",
|
||||
inputs={"X": [input]},
|
||||
outputs={"Out": [topk_out],
|
||||
"Indices": [topk_indices]},
|
||||
attrs={"k": k})
|
||||
acc_out = block.create_var(dtype=kwargs.get("out_dtype", "float32"))
|
||||
correct = block.create_var(dtype="int64", persistable=True)
|
||||
total = block.create_var(dtype="int64", persistable=True)
|
||||
block.append_op(
|
||||
type="accuracy",
|
||||
inputs={
|
||||
"Out": [topk_out],
|
||||
"Indices": [topk_indices],
|
||||
"Label": [label]
|
||||
},
|
||||
outputs={
|
||||
"Accuracy": [acc_out],
|
||||
"Correct": [correct],
|
||||
"Total": [total],
|
||||
})
|
||||
|
||||
block.append_op(
|
||||
type="cast",
|
||||
inputs={"X": [self._states["Total"]]},
|
||||
outputs={"Out": [self._states["Total"]]},
|
||||
attrs={
|
||||
"in_data_type": 5, # float32
|
||||
"out_data_type": 2, #int32
|
||||
})
|
||||
block.append_op(
|
||||
type="cast",
|
||||
inputs={"X": [self._states["Correct"]]},
|
||||
outputs={"Out": [self._states["Correct"]]},
|
||||
attrs={
|
||||
"in_data_type": 5,
|
||||
"out_data_type": 2,
|
||||
})
|
||||
|
||||
block.append_op(
|
||||
type="elementwise_add",
|
||||
inputs={"X": [self._states["Total"]],
|
||||
"Y": [total]},
|
||||
outputs={"Out": [self._states["Total"]]})
|
||||
block.append_op(
|
||||
type="elementwise_add",
|
||||
inputs={"X": [self._states["Correct"]],
|
||||
"Y": [correct]},
|
||||
outputs={"Out": [self._states["Correct"]]})
|
||||
|
||||
return acc_out
|
||||
|
||||
def eval(self, executor, eval_program=None):
|
||||
if eval_program != None:
|
||||
eval_program = eval_program
|
||||
else:
|
||||
eval_program = Program()
|
||||
block = eval_program.global_block()
|
||||
eval_out = block.create_var(dtype=self._states["Total"].data_type)
|
||||
e_total = _clone_var_in_block_(block, self._states["Total"])
|
||||
e_correct = _clone_var_in_block_(block, self._states["Correct"])
|
||||
block.append_op(
|
||||
type="cast",
|
||||
inputs={"X": [e_total]},
|
||||
outputs={"Out": [e_total]},
|
||||
attrs={
|
||||
"in_data_type": 2, #int32
|
||||
"out_data_type": 5, #float32
|
||||
})
|
||||
block.append_op(
|
||||
type="cast",
|
||||
inputs={"X": [e_correct]},
|
||||
outputs={"Out": [e_correct]},
|
||||
attrs={
|
||||
"in_data_type": 2,
|
||||
"out_data_type": 5,
|
||||
})
|
||||
block.append_op(
|
||||
type="elementwise_div",
|
||||
inputs={"X": e_correct,
|
||||
"Y": e_total},
|
||||
outputs={"Out": eval_out})
|
||||
out = executor.run(eval_program, fetch_list=[eval_out])
|
||||
return np.array(out[0])
|
||||
|
||||
|
||||
def accuracy(*args, **kwargs):
|
||||
cls = Accuracy(*args, **kwargs)
|
||||
out = cls._update_ops(*args, **kwargs)
|
||||
return cls, out
|
||||
|
@ -1,64 +0,0 @@
|
||||
from paddle.v2.fluid.evaluator import Evaluator
|
||||
from paddle.v2.fluid.op import Operator
|
||||
import paddle.v2.fluid.core as core
|
||||
import unittest
|
||||
import op_test
|
||||
import numpy as np
|
||||
|
||||
|
||||
class TestEvaluator(unittest.TestCase):
|
||||
def setup(self, scope, inputs, outputs):
|
||||
def __create_var__(var_name, arr):
|
||||
np_arr = np.array(arr)
|
||||
scope.var(var_name)
|
||||
# tensor = var.get_tensor()
|
||||
# tensor.set_dims(np_arr.shape)
|
||||
|
||||
for var_name, arr in inputs.iteritems():
|
||||
__create_var__(var_name, arr)
|
||||
|
||||
for var_name, arr in outputs.iteritems():
|
||||
__create_var__(var_name, arr)
|
||||
|
||||
def test_evaluator(self):
|
||||
|
||||
inputs = {
|
||||
'Inference': np.array([[1, 1, 1, 1, 1, 0, 0, 0, 0, 1]]).T,
|
||||
'Label': np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
|
||||
}
|
||||
outputs = {'Accuracy': np.array([0.9])}
|
||||
out_name = 'Accuracy'
|
||||
|
||||
places = [core.CPUPlace()]
|
||||
if core.is_compile_gpu():
|
||||
places.append(core.GPUPlace(0))
|
||||
|
||||
for place in places:
|
||||
scope = core.Scope()
|
||||
self.setup(scope, inputs, outputs)
|
||||
|
||||
evaluator = Evaluator(
|
||||
scope,
|
||||
operator='accuracy',
|
||||
input='Inference',
|
||||
label='Label',
|
||||
output=out_name,
|
||||
place=place)
|
||||
op_test.set_input(scope, evaluator.op, inputs, place)
|
||||
ctx = core.DeviceContext.create(place)
|
||||
|
||||
for i in range(10): # simulate 10 mini-batches
|
||||
evaluator.evaluate(ctx)
|
||||
|
||||
actual = np.array(scope.find_var(out_name).get_tensor())
|
||||
print actual
|
||||
|
||||
self.assertTrue(
|
||||
np.allclose(
|
||||
actual, outputs[out_name], atol=1e-5),
|
||||
"output name: " + out_name + " has diff.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
exit(0)
|
||||
unittest.main()
|
@ -0,0 +1,3 @@
|
||||
import paddle.v2.framework.core as core
|
||||
from paddle.v2.framework.framework import OpProtoHolder, Variable, Program, \
|
||||
Operator
|
Loading…
Reference in new issue