|
|
|
@ -21,8 +21,7 @@ class Layer(object):
|
|
|
|
|
# ...
|
|
|
|
|
return self.apply(inputs):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def apply(inputs):
|
|
|
|
|
def forward(inputs):
|
|
|
|
|
# forward logic with paddle operators. backward auto-generated.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -36,6 +35,7 @@ class PyLayer(core.PyLayer):
|
|
|
|
|
# any forward logic implemented with numpy io.
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def backward(inputs):
|
|
|
|
|
# any backward logic implemented with numpy io.
|
|
|
|
|
```
|
|
|
|
|
|
|
|
|
@ -67,7 +67,6 @@ class Tracer {
|
|
|
|
|
Lots of research already.
|
|
|
|
|
https://autodiff-workshop.github.io/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## Tests
|
|
|
|
|
|
|
|
|
|
* All op tests run once in static graph, once in imperative mode.
|
|
|
|
@ -131,6 +130,7 @@ class MLP(fluid.imperative.Layer):
|
|
|
|
|
out._backward()
|
|
|
|
|
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Plan
|
|
|
|
|
|
|
|
|
|
2.1,3 fulltime, Can run a few simple models. (Currently, 2 20% engs)
|
|
|
|
@ -143,6 +143,7 @@ class MLP(fluid.imperative.Layer):
|
|
|
|
|
|
|
|
|
|
12.1, 5 fulltime, Can compile to static graph, support more optimizations.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Discussion
|
|
|
|
|
|
|
|
|
|
TODO.
|
|
|
|
|