forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathcopy_rows_to_tensor_op.h
83 lines (73 loc) · 2.54 KB
/
copy_rows_to_tensor_op.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
// Copyright 2004-present Facebook. All Rights Reserved.
#pragma once
#include <unordered_map>
#include <unordered_set>
#include "caffe2/core/context.h"
#include "caffe2/core/operator.h"
#include "caffe2/utils/eigen_utils.h"
namespace caffe2 {
template <class Context>
class CopyRowsToTensorOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CopyRowsToTensorOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<at::Half, float, double, int32_t, int64_t>>::
call(this, Input(INPUT_TENSOR));
}
template <typename T>
bool DoRunWithType() {
auto& input_tensor = Input(INPUT_TENSOR);
auto& indices = Input(INDICES);
auto& row = Input(ROW);
auto tensor_width = input_tensor.size(1);
CAFFE_ENFORCE_EQ(input_tensor.dim(), 2, "INPUT_TENSOR should be 2-d");
CAFFE_ENFORCE_EQ(indices.dim(), 1, "INDICES should be 1-d");
CAFFE_ENFORCE_EQ(row.dim(), 1, "ROW should be 1-d");
CAFFE_ENFORCE_EQ(
tensor_width,
row.size(0),
"width of input tensor should match lengths of row");
const auto* indices_data = indices.template data<int64_t>();
const auto* row_data = row.template data<T>();
auto* output = Output(0);
auto* output_data = output->template mutable_data<T>();
CAFFE_ENFORCE(
IsInputOutputAlias(0, 0), "Input 0 and Output 0 should be alias.");
// NOLINTNEXTLINE(clang-diagnostic-sign-compare)
for (size_t i = 0; i < indices.sizes()[0]; ++i) {
std::memcpy(
output_data + indices_data[i] * tensor_width,
row_data,
tensor_width * sizeof(T));
}
return true;
}
protected:
INPUT_TAGS(INPUT_TENSOR, INDICES, ROW);
};
template <class Context>
class CopyRowsToTensorGradientOp : public Operator<Context> {
public:
USE_OPERATOR_CONTEXT_FUNCTIONS;
CopyRowsToTensorGradientOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws) {}
bool RunOnDevice() override {
return DispatchHelper<
TensorTypes<at::Half, float, double, int32_t, int64_t>>::
call(this, Input(0));
}
template <typename T>
bool DoRunWithType() {
auto* output = Output(0);
output->ResizeLike(Input(0));
auto* output_data = output->template mutable_data<T>();
auto& input = Input(0);
const auto* input_data = input.template data<T>();
std::memcpy(output_data, input_data, input.size(0) * sizeof(T));
return true;
}
};
} // namespace caffe2