forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathreshape_op.h
178 lines (163 loc) · 5.63 KB
/
reshape_op.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
#ifndef CAFFE2_OPERATORS_RESHAPE_OP_H_
#define CAFFE2_OPERATORS_RESHAPE_OP_H_
#include "caffe2/core/common_omp.h"
#include "caffe2/core/context.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/math.h"
#include "c10/util/irange.h"
namespace caffe2 {
// Takes a shape and data tensor and reshapes it
template <typename F, class Context>
class ReshapeOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
template <class... Args>
explicit ReshapeOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...),
new_shape_(this->template GetRepeatedArgument<int64_t>("shape")) {}
bool RunOnDevice() override {
if (InputSize() == 2) {
return DispatchHelper<TensorTypes<int, int64_t>>::call(this, Input(1));
}
CAFFE_ENFORCE(
OperatorBase::HasArgument("shape"), "Argument `shape` is missing.");
return this->template DoRunWithType<int64_t>();
}
template <typename T>
bool DoRunWithType() {
DoRunWithTypeImpl<T>(Input(0), Output(0));
return true;
}
protected:
template <typename T>
void DoRunWithTypeImpl(const Tensor& input, Tensor* output) {
vector<int64_t> actual_new_shape = new_shape_;
if (InputSize() == 2) {
CAFFE_ENFORCE(
!OperatorBase::HasArgument("shape"),
"New shape is specified by the input blob, do not pass in "
"the argument `shape`.");
// Shape should be always stored only on CPU
// Just in case if for some reason shape is on GPU
if (this->InputIsTensorType(1, CPU)) {
// originally, shape input must be in CPU context
auto& shape = this->template Input<Tensor>(1, CPU);
CAFFE_ENFORCE_EQ(
shape.dim(),
1,
"When input_as_shape is true, the input must be a 1D tensor of "
"data type int64_t");
CAFFE_ENFORCE(shape.numel() > 0);
auto* shape_data = shape.template data<T>();
actual_new_shape.insert(
actual_new_shape.end(), shape_data, shape_data + shape.dim32(0));
} else {
auto& shape = Input(1);
CAFFE_ENFORCE_EQ(
shape.dim(),
1,
"When input_as_shape is true, the input must be a 1D tensor of "
"data type int64_t");
CAFFE_ENFORCE(shape.numel() > 0);
auto* shape_data = shape.template data<T>();
// Fetch copy from
std::unique_ptr<T[]> shape_data_copy =
std::make_unique<T[]>(shape.dim32(0));
context_.template CopyToCPU<T>(
shape.dim32(0), shape_data, shape_data_copy.get());
actual_new_shape.insert(
actual_new_shape.end(),
shape_data_copy.get(),
shape_data_copy.get() + shape.dim32(0));
}
}
// Checks if the new shape is valid and fills in the missing dimension
// specified by -1.
// NOTE: At most one dimension can be -1.
auto total_size = input.numel();
T size = 1;
// NOTE: support for legacy caffe1 syntax
// Copy over the dimensions for those that are specified zero.
if (total_size != 0) {
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (size_t i = 0; i < actual_new_shape.size() && i < input.dim(); ++i) {
if (actual_new_shape[i] == 0) {
actual_new_shape[i] = input.size(i);
}
}
}
int unknown_idx = -1;
for (const auto i : c10::irange(actual_new_shape.size())) {
const auto dim = actual_new_shape[i];
if (dim == -1) {
CAFFE_ENFORCE(
unknown_idx == -1,
"Argument `shape` has more than one missing dimension.");
unknown_idx = i;
} else {
size *= dim;
}
}
if (size == 0 && total_size != 0) {
CAFFE_THROW(
"Can not reshape a non-zero size (",
total_size,
") tensor to zero size.");
}
if (total_size != 0) {
// if tensor is not empty, infer the size of the unknown index
if (unknown_idx != -1) {
CAFFE_ENFORCE_NE(
size,
0,
"New shape at dim ",
unknown_idx,
" can not be inferred since new size is zero.");
CAFFE_ENFORCE(
total_size % size == 0,
"Argument `shape` does not agree with the input data.",
" (",
total_size,
" vs ",
size,
")");
actual_new_shape[unknown_idx] = total_size / size;
} else {
CAFFE_ENFORCE_EQ(
total_size,
size,
"Argument `shape` does not agree with the input data.",
" (",
total_size,
" != ",
size,
")");
}
} else if (unknown_idx != -1) {
// if size is empty, then set unknown index to be 0 (empty tensor)
actual_new_shape[unknown_idx] = 0;
}
// Write the original shape to the second output.
auto* old_shape = this->template Output<Tensor>(1, CPU);
old_shape->Resize(input.sizes().size());
T* old_shape_data = old_shape->template mutable_data<T>();
std::vector<T> old_shape_vector(input.sizes().begin(), input.sizes().end());
for (const auto i : c10::irange(old_shape_vector.size())) {
old_shape_data[i] = old_shape_vector[i];
}
output->Resize(actual_new_shape);
if (output != &input) {
// If we are not doing in-place computation, a copy is needed.
context_.CopyItemsSameDevice(
input.dtype(),
input.numel(),
input.raw_data(),
output->raw_mutable_data(input.dtype()));
}
}
private:
vector<int64_t> new_shape_;
};
} // namespace caffe2
#endif // CAFFE2_OPERATORS_RESHAPE_OP_H_