Skip to content

Commit e7c1344

Browse files
authored
[core] Api 2.0/migrate Add operator to new API (#19984)
* Migrate Add operator to new API * Remove `visit_attributes` as it calls base impl * Use shape inference to calculate broadcast shape
1 parent aa293c0 commit e7c1344

File tree

9 files changed

+107
-106
lines changed

9 files changed

+107
-106
lines changed

src/core/include/openvino/op/add.hpp

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,6 @@ class OPENVINO_API Add : public util::BinaryElementwiseArithmetic {
3838

3939
std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const override;
4040

41-
bool visit_attributes(AttributeVisitor& visitor) override;
42-
43-
OPENVINO_SUPPRESS_DEPRECATED_START
44-
bool evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const override;
45-
OPENVINO_SUPPRESS_DEPRECATED_END
4641
bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override;
4742
bool has_evaluate() const override;
4843
};

src/core/reference/include/openvino/reference/add.hpp

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,30 +4,37 @@
44

55
#pragma once
66

7+
#include <algorithm>
78
#include <cstddef>
89

9-
#include "ngraph/shape_util.hpp"
1010
#include "openvino/reference/autobroadcast_binop.hpp"
1111

1212
namespace ov {
1313
namespace reference {
14-
template <typename T>
15-
void add(const T* arg0, const T* arg1, T* out, size_t count) {
16-
for (size_t i = 0; i < count; i++) {
17-
out[i] = arg0[i] + arg1[i];
18-
}
14+
15+
template <class T>
16+
void add(const T* arg0, const T* arg1, T* out, const size_t count) {
17+
std::transform(arg0, std::next(arg0, count), arg1, out, std::plus<T>());
1918
}
2019

21-
template <typename T>
20+
/**
21+
* @brief Reference implementation of binary elementwise Add operator.
22+
*
23+
* @param arg0 Pointer to input 0 data.
24+
* @param arg1 Pointer to input 1 data.
25+
* @param out Pointer to output data.
26+
* @param arg_shape0 Input 0 shape.
27+
* @param arg_shape1 Input 1 shape.
28+
* @param broadcast_spec Broadcast specification mode.
29+
*/
30+
template <class T>
2231
void add(const T* arg0,
2332
const T* arg1,
2433
T* out,
2534
const Shape& arg0_shape,
2635
const Shape& arg1_shape,
2736
const op::AutoBroadcastSpec& broadcast_spec) {
28-
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, [](T x, T y) -> T {
29-
return x + y;
30-
});
37+
autobroadcast_binop(arg0, arg1, out, arg0_shape, arg1_shape, broadcast_spec, std::plus<T>());
3138
}
3239
} // namespace reference
3340
} // namespace ov

src/core/shape_inference/CMakeLists.txt

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@ set_target_properties(${TARGET_NAME} PROPERTIES EXPORT_NAME shape_inference)
2323

2424
target_include_directories(${TARGET_NAME} PUBLIC
2525
$<BUILD_INTERFACE:${SHAPE_INFER_INCLUDE_DIR}>
26-
$<BUILD_INTERFACE:${OV_CORE_INCLUDE_PATH}>)
26+
$<BUILD_INTERFACE:${OV_CORE_INCLUDE_PATH}>
27+
$<BUILD_INTERFACE:$<TARGET_PROPERTY:openvino::core::dev,INTERFACE_INCLUDE_DIRECTORIES>>)
2728

2829
ov_add_clang_format_target(${TARGET_NAME}_clang FOR_TARGETS ${TARGET_NAME})
2930

src/core/shape_inference/include/utils.hpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -411,6 +411,17 @@ ov::optional<TResult> get_input_bounds(const ov::Node* op, size_t port, const IT
411411
}
412412
return out;
413413
}
414+
415+
/**
416+
* @brief Inference broadcast shape for element wise operator according to broadcast specification stored in operator.
417+
*
418+
* @param op Pointer to operator.
419+
* @param first First input shape.
420+
* @param second Second input shape.
421+
*
422+
* @return Result shape from inputs with applied broadcast specification.
423+
*/
424+
ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::Shape& first, const ov::Shape& second);
414425
} // namespace op
415426

416427
/**
Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
// Copyright (C) 2018-2023 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
5+
#include "utils.hpp"
6+
7+
#include "eltwise_shape_inference.hpp"
8+
9+
namespace ov {
10+
namespace op {
11+
12+
ov::Shape infer_broadcast_shape(const ov::Node* const op, const ov::Shape& first, const ov::Shape& second) {
13+
return eltwise_shape_infer(op, std::vector<ov::PartialShape>{first, second}).front().to_shape();
14+
}
15+
} // namespace op
16+
} // namespace ov

src/core/src/op/add.cpp

Lines changed: 52 additions & 80 deletions
Original file line numberDiff line numberDiff line change
@@ -2,111 +2,83 @@
22
// SPDX-License-Identifier: Apache-2.0
33
//
44

5-
#include "ngraph/op/add.hpp"
5+
#include "openvino/op/add.hpp"
66

7+
#include "element_visitor.hpp"
78
#include "itt.hpp"
8-
#include "ngraph/runtime/host_tensor.hpp"
99
#include "openvino/reference/add.hpp"
10+
#include "utils.hpp"
1011

11-
using namespace std;
12-
using namespace ngraph;
13-
14-
OPENVINO_SUPPRESS_DEPRECATED_START
12+
namespace ov {
13+
namespace op {
1514
namespace add {
16-
namespace {
17-
template <element::Type_t ET>
18-
bool evaluate(const HostTensorPtr& arg0,
19-
const HostTensorPtr& arg1,
20-
const HostTensorPtr& out,
21-
const op::AutoBroadcastSpec& broadcast_spec) {
22-
ov::reference::add(arg0->get_data_ptr<ET>(),
23-
arg1->get_data_ptr<ET>(),
24-
out->get_data_ptr<ET>(),
25-
arg0->get_shape(),
26-
arg1->get_shape(),
27-
broadcast_spec);
28-
return true;
29-
}
15+
struct Evaluate : element::NoAction<bool> {
16+
using ov::element::NoAction<bool>::visit;
3017

31-
bool evaluate_add(const HostTensorPtr& arg0,
32-
const HostTensorPtr& arg1,
33-
const HostTensorPtr& out,
34-
const op::AutoBroadcastSpec& broadcast_spec) {
35-
bool rc = true;
36-
out->set_broadcast(broadcast_spec, arg0, arg1);
37-
switch (arg0->get_element_type()) {
38-
NGRAPH_TYPE_CASE(evaluate_add, i8, arg0, arg1, out, broadcast_spec);
39-
NGRAPH_TYPE_CASE(evaluate_add, i16, arg0, arg1, out, broadcast_spec);
40-
NGRAPH_TYPE_CASE(evaluate_add, i32, arg0, arg1, out, broadcast_spec);
41-
NGRAPH_TYPE_CASE(evaluate_add, i64, arg0, arg1, out, broadcast_spec);
42-
NGRAPH_TYPE_CASE(evaluate_add, u8, arg0, arg1, out, broadcast_spec);
43-
NGRAPH_TYPE_CASE(evaluate_add, u16, arg0, arg1, out, broadcast_spec);
44-
NGRAPH_TYPE_CASE(evaluate_add, u32, arg0, arg1, out, broadcast_spec);
45-
NGRAPH_TYPE_CASE(evaluate_add, u64, arg0, arg1, out, broadcast_spec);
46-
NGRAPH_TYPE_CASE(evaluate_add, bf16, arg0, arg1, out, broadcast_spec);
47-
NGRAPH_TYPE_CASE(evaluate_add, f16, arg0, arg1, out, broadcast_spec);
48-
NGRAPH_TYPE_CASE(evaluate_add, f32, arg0, arg1, out, broadcast_spec);
49-
default:
50-
rc = false;
51-
break;
18+
template <element::Type_t ET>
19+
static result_type visit(const Tensor& in0,
20+
const Tensor& in1,
21+
Tensor& out,
22+
const AutoBroadcastSpec& broadcast_spec) {
23+
using T = typename element_type_traits<ET>::value_type;
24+
reference::add(in0.data<const T>(),
25+
in1.data<const T>(),
26+
out.data<T>(),
27+
in0.get_shape(),
28+
in1.get_shape(),
29+
broadcast_spec);
30+
return true;
5231
}
53-
return rc;
54-
}
55-
} // namespace
32+
};
5633
} // namespace add
5734

5835
// ------------------------------- v1 ------------------------------------------
59-
60-
op::v1::Add::Add(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
36+
namespace v1 {
37+
Add::Add(const Output<Node>& arg0, const Output<Node>& arg1, const AutoBroadcastSpec& auto_broadcast)
6138
: BinaryElementwiseArithmetic(arg0, arg1, auto_broadcast) {
6239
constructor_validate_and_infer_types();
6340
}
6441

65-
bool op::v1::Add::visit_attributes(AttributeVisitor& visitor) {
66-
OV_OP_SCOPE(v1_Add_visit_attributes);
67-
BinaryElementwiseArithmetic::visit_attributes(visitor);
68-
return true;
69-
}
70-
71-
shared_ptr<Node> op::v1::Add::clone_with_new_inputs(const OutputVector& new_args) const {
42+
std::shared_ptr<Node> Add::clone_with_new_inputs(const OutputVector& new_args) const {
7243
OV_OP_SCOPE(v1_Add_clone_with_new_inputs);
7344
check_new_args_count(this, new_args);
74-
return make_shared<op::v1::Add>(new_args.at(0), new_args.at(1), this->get_autob());
45+
return std::make_shared<op::v1::Add>(new_args.at(0), new_args.at(1), this->get_autob());
7546
}
7647

77-
bool op::v1::Add::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const {
48+
bool Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
7849
OV_OP_SCOPE(v1_Add_evaluate);
79-
return add::evaluate_add(inputs[0], inputs[1], outputs[0], get_autob());
80-
}
50+
OPENVINO_ASSERT(outputs.size() == 1);
51+
OPENVINO_ASSERT(inputs.size() == 2);
8152

82-
bool op::v1::Add::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) const {
83-
OV_OP_SCOPE(v1_Add_evaluate);
84-
if (std::none_of(inputs.cbegin(), inputs.cend(), [](const ov::Tensor& t) {
85-
return is_vector(t.get_shape()) && t.get_shape().front() == 0;
86-
})) {
87-
return BinaryElementwiseArithmetic::evaluate(outputs, inputs);
88-
} else {
89-
return true;
90-
}
53+
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
54+
using namespace ov::element;
55+
return IfTypeOf<bf16, f16, f32, i8, i16, i32, i64, u8, u16, u32, u64>::apply<add::Evaluate>(
56+
inputs[0].get_element_type(),
57+
inputs[0],
58+
inputs[1],
59+
outputs[0],
60+
get_autob());
9161
}
9262

93-
bool op::v1::Add::has_evaluate() const {
63+
bool Add::has_evaluate() const {
9464
OV_OP_SCOPE(v1_Add_has_evaluate);
9565
switch (get_input_element_type(0)) {
96-
case ngraph::element::i8:
97-
case ngraph::element::i16:
98-
case ngraph::element::i32:
99-
case ngraph::element::i64:
100-
case ngraph::element::u8:
101-
case ngraph::element::u16:
102-
case ngraph::element::u32:
103-
case ngraph::element::u64:
104-
case ngraph::element::bf16:
105-
case ngraph::element::f16:
106-
case ngraph::element::f32:
66+
case element::i8:
67+
case element::i16:
68+
case element::i32:
69+
case element::i64:
70+
case element::u8:
71+
case element::u16:
72+
case element::u32:
73+
case element::u64:
74+
case element::bf16:
75+
case element::f16:
76+
case element::f32:
10777
return true;
10878
default:
109-
break;
79+
return false;
11080
}
111-
return false;
11281
}
82+
} // namespace v1
83+
} // namespace op
84+
} // namespace ov

src/core/src/op/mod.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
#include "element_visitor.hpp"
88
#include "itt.hpp"
99
#include "openvino/reference/mod.hpp"
10-
#include "shape_util.hpp"
10+
#include "utils.hpp"
1111

1212
namespace ov {
1313
namespace op {
@@ -49,7 +49,7 @@ bool Mod::evaluate(ov::TensorVector& outputs, const ov::TensorVector& inputs) co
4949
OPENVINO_ASSERT(outputs.size() == 1);
5050
OPENVINO_ASSERT(inputs.size() == 2);
5151

52-
outputs[0].set_shape(ov::util::get_broadcast_shape(inputs[0].get_shape(), inputs[1].get_shape(), get_autob()));
52+
outputs[0].set_shape(infer_broadcast_shape(this, inputs[0].get_shape(), inputs[1].get_shape()));
5353
using namespace ov::element;
5454
return IfTypeOf<i8, i16, i32, i64, u8, u16, u32, u64>::apply<mod::Evaluate>(inputs[0].get_element_type(),
5555
inputs[0],

src/core/src/op/xor.cpp

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#include "itt.hpp"
99
#include "openvino/op/logical_xor.hpp"
1010
#include "openvino/reference/xor.hpp"
11-
#include "shape_util.hpp"
11+
#include "utils.hpp"
1212

1313
namespace ov {
1414
namespace op {
@@ -37,18 +37,17 @@ bool input_supported_type(const element::Type& et) {
3737
return et == element::boolean;
3838
}
3939

40-
bool evaluate(TensorVector& outputs, const TensorVector& inputs, const AutoBroadcastSpec& broadcast_spec) {
40+
bool evaluate(const Node* const op, TensorVector& outputs, const TensorVector& inputs) {
4141
OPENVINO_ASSERT(outputs.size() == 1);
4242
OPENVINO_ASSERT(inputs.size() == 2);
4343

44-
outputs[0].set_shape(ov::util::get_broadcast_shape(inputs[0].get_shape(), inputs[1].get_shape(), broadcast_spec));
45-
44+
outputs[0].set_shape(infer_broadcast_shape(op, inputs[0].get_shape(), inputs[1].get_shape()));
4645
using namespace ov::element;
4746
return IfTypeOf<boolean>::apply<logxor::Evaluate>(inputs[0].get_element_type(),
4847
inputs[0],
4948
inputs[1],
5049
outputs[0],
51-
broadcast_spec);
50+
op->get_autob());
5251
}
5352
} // namespace
5453
} // namespace logxor
@@ -68,7 +67,7 @@ std::shared_ptr<Node> Xor::clone_with_new_inputs(const OutputVector& new_args) c
6867
bool Xor::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
6968
OV_OP_SCOPE(v0_Xor_evaluate);
7069

71-
return logxor::evaluate(outputs, inputs, get_autob());
70+
return logxor::evaluate(this, outputs, inputs);
7271
}
7372

7473
bool Xor::has_evaluate() const {
@@ -92,7 +91,7 @@ std::shared_ptr<Node> LogicalXor::clone_with_new_inputs(const OutputVector& new_
9291
bool LogicalXor::evaluate(TensorVector& outputs, const TensorVector& inputs) const {
9392
OV_OP_SCOPE(v1_LogicalXor_evaluate);
9493

95-
return logxor::evaluate(outputs, inputs, get_autob());
94+
return logxor::evaluate(this, outputs, inputs);
9695
}
9796

9897
bool LogicalXor::has_evaluate() const {

src/core/tests/pass/constant_folding.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3847,7 +3847,7 @@ class MockAddOp : public ov::op::v1::Add {
38473847
const ov::op::AutoBroadcastSpec& auto_broadcast = ov::op::AutoBroadcastSpec(ov::op::AutoBroadcastType::NUMPY))
38483848
: ov::op::v1::Add(arg0, arg1, auto_broadcast) {
38493849
ON_CALL(*this, evaluate).WillByDefault([this](ov::TensorVector& outputs, const ov::TensorVector& inputs) {
3850-
return ov::Node::evaluate(outputs, inputs);
3850+
return ov::op::v1::Add::evaluate(outputs, inputs);
38513851
});
38523852
}
38533853
MOCK_METHOD(bool,

0 commit comments

Comments
 (0)