forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathlstm_unit_op.cc
52 lines (48 loc) · 1.77 KB
/
lstm_unit_op.cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#include "lstm_unit_op.h"
namespace caffe2 {
REGISTER_CPU_OPERATOR(LSTMUnit, LSTMUnitOp<CPUContext>);
OPERATOR_SCHEMA(LSTMUnit)
.NumInputs(4, 5)
.NumOutputs(2)
.SetDoc(R"DOC(
LSTMUnit computes the activations of a standard LSTM (without peephole
connections), in a sequence-length aware fashion.
Concretely, given the (fused) inputs X (TxNxD), the previous cell
state (NxD), and the sequence lengths (N), computes the LSTM
activations, avoiding computation if the input is invalid (as in, the
value at X{t][n] >= seqLengths[n].
)DOC")
.Arg("forget_bias", "Bias term to add in while calculating forget gate")
.Arg(
"sequence_lengths",
"When false, the sequence lengths input is left out, "
"and all following inputs are shifted left by one.");
REGISTER_CPU_OPERATOR(LSTMUnitGradient, LSTMUnitGradientOp<CPUContext>);
OPERATOR_SCHEMA(LSTMUnitGradient)
.NumInputs(8, 9)
.NumOutputs(3)
.Arg(
"sequence_lengths",
"When false, the sequence lengths input is left out, "
"and all following inputs are shifted left by one.");
class GetLSTMUnitGradient : public GradientMakerBase {
using GradientMakerBase::GradientMakerBase;
vector<OperatorDef> GetGradientDefs() override {
if (GetFlagArgument(def_, "sequence_lengths", true)) {
return SingleGradientDef(
"LSTMUnitGradient",
"",
vector<string>{
I(0), I(1), I(2), I(3), I(4), O(0), O(1), GO(0), GO(1)},
vector<string>{GI(0), GI(1), GI(2)});
} else {
return SingleGradientDef(
"LSTMUnitGradient",
"",
vector<string>{I(0), I(1), I(2), I(3), O(0), O(1), GO(0), GO(1)},
vector<string>{GI(0), GI(1), GI(2)});
}
}
};
REGISTER_GRADIENT(LSTMUnit, GetLSTMUnitGradient);
}