diff --git a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/sequence/one-hot-1.rst b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/sequence/one-hot-1.rst index 3a705ef6eb8ebe..4c68b0b2c78cb8 100644 --- a/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/sequence/one-hot-1.rst +++ b/docs/articles_en/documentation/openvino-ir-format/operation-sets/operation-specs/sequence/one-hot-1.rst @@ -37,9 +37,24 @@ The types of input scalars ``on_value`` and ``off_value`` should match and be eq * **Type**: ``int`` * **Required**: *yes* +* *negative_indices_mode* + + * **Description**: controls how negative indices in indices tensor are handled. + * **Range of values**: + + * ``ignore-negative``: negative indices are ignored, and the corresponding output rows are filled with ``off_value``. + * ``normalize``: negative indices in the range ``[-depth, -1]`` are normalized by ``depth + index``; which effectively maps the range ``[-depth, depth-1]`` to ``[0, depth-1]``. + + * **Type**: ``string`` + * **Default value**: ignore-negative + * **Required**: *no* + +.. note:: + Behavior before 2025.4 OpenVINO release: negative_indices_mode was not supported and negative indices were handled according to ignore-negative mode. + **Inputs**: -* **1**: ``indices``: input tensor of type *T1* with non-negative indices, behavior for negative indices is undefined. Can be 0D. **Required.** +* **1**: ``indices``: input tensor of type *T1* with indices. Can be 0D. **Required.** * **2**: ``depth``: positive scalar (0D tensor) of type *T1* that specifies the number of classes and thus the size of the one-hot dimension. **Required.** * **3**: ``on_value``: scalar (0D tensor) of type *T2* that fills the locations in output tensor specified in ``indices``. **Required.** * **4**: ``off_value``: scalar (0D tensor) of type *T2* that fills the locations not represented in ``indices``. **Required.** @@ -60,9 +75,9 @@ The types of input scalars ``on_value`` and ``off_value`` should match and be eq :force: - + - + 4 diff --git a/src/core/include/openvino/op/one_hot.hpp b/src/core/include/openvino/op/one_hot.hpp index 058066c390b660..56e4cf32edf0cd 100644 --- a/src/core/include/openvino/op/one_hot.hpp +++ b/src/core/include/openvino/op/one_hot.hpp @@ -4,7 +4,7 @@ #pragma once -#include "openvino/op/op.hpp" +#include "openvino/op/util/one_hot_base.hpp" namespace ov { namespace op { @@ -12,7 +12,7 @@ namespace v1 { /// \brief OneHot operation. /// /// \ingroup ov_ops_cpp_api -class OPENVINO_API OneHot : public Op { +class OPENVINO_API OneHot : public util::OneHotBase { public: OPENVINO_OP("OneHot", "opset1", op::Op); @@ -40,19 +40,73 @@ class OPENVINO_API OneHot : public Op { bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; bool has_evaluate() const override; +}; +} // namespace v1 +namespace v16 { +/// \brief OneHot operation. +/// +/// \ingroup ov_ops_cpp_api +class OPENVINO_API OneHot : public util::OneHotBase { +public: + OPENVINO_OP("OneHot", "opset16", op::Op); + + /// \brief Lists the supported negative indices modes for this version of the operator. + /// See the specification for the description of how negative indices are handled. + enum class NegativeIndicesMode { IGNORE_NEGATIVE, NORMALIZE }; + + /// \brief Constructs a one-hot operation. + OneHot() = default; + /// \brief Constructs a one-hot operation. + /// + /// \param indices Input tensor containing indices. + /// \param depth Specifies number of classes and the size of one-hot dimension. + /// \param on_value Specifies value that the locations in output tensor represented + /// by indices in input take. + /// \param off_value Specifies value that the locations in output tensor not + /// represented + /// by indices in input take. + /// \param axis Axis along which one-hot representation in added. + OneHot(const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis, + NegativeIndicesMode mode = NegativeIndicesMode::IGNORE_NEGATIVE); + + bool visit_attributes(AttributeVisitor& visitor) override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + void validate_and_infer_types() override; + + bool evaluate(TensorVector& outputs, const TensorVector& inputs) const override; + bool has_evaluate() const override; - /// \return The index of the one-hot axis. - const int64_t& get_axis() const { - return m_axis; + /// \brief Sets the negative indices mode. + void set_negative_indices_mode(NegativeIndicesMode mode) { + m_negative_indices_mode = mode; + } + /// \return The negative indices mode. + NegativeIndicesMode get_negative_indices_mode() const { + return m_negative_indices_mode; } - void set_axis(int64_t axis); protected: - int64_t m_axis; - -private: - friend void inline resolve_axis(OneHot* op); + NegativeIndicesMode m_negative_indices_mode; }; -} // namespace v1 +} // namespace v16 } // namespace op + +OPENVINO_API +std::ostream& operator<<(std::ostream& s, const op::v16::OneHot::NegativeIndicesMode& mode); + +template <> +class OPENVINO_API AttributeAdapter + : public EnumAttributeAdapterBase { +public: + AttributeAdapter(op::v16::OneHot::NegativeIndicesMode& value) + : EnumAttributeAdapterBase(value) {} + + OPENVINO_RTTI("AttributeAdapter"); + ~AttributeAdapter() override; +}; + } // namespace ov diff --git a/src/core/include/openvino/op/util/one_hot_base.hpp b/src/core/include/openvino/op/util/one_hot_base.hpp new file mode 100644 index 00000000000000..194862c0fc0c6c --- /dev/null +++ b/src/core/include/openvino/op/util/one_hot_base.hpp @@ -0,0 +1,57 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include "openvino/op/op.hpp" +#include "openvino/op/util/attr_types.hpp" + +namespace ov { +namespace op { +namespace util { +class OPENVINO_API OneHotBase : public Op { +public: + OPENVINO_OP("OneHot", "util"); + + /// \brief Constructs a one-hot operation. + OneHotBase() = default; + + /// \brief Constructs a one-hot operation. + /// + /// \param indices Input tensor containing indices. + /// \param depth Specifies number of classes and the size of one-hot dimension. + /// \param on_value Specifies value that the locations in output tensor represented + /// by indices in input take. + /// \param off_value Specifies value that the locations in output tensor not + /// represented + /// by indices in input take. + /// \param axis Axis along which one-hot representation in added. + OneHotBase(const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis); + + bool visit_attributes(AttributeVisitor& visitor) override; + void validate_and_infer_types() override; + std::shared_ptr clone_with_new_inputs(const OutputVector& new_args) const override; + + /// \return The index of the one-hot axis. + const int64_t& get_axis() const { + return m_axis; + } + + /// @brief Sets the index of the one-hot axis. + /// @param axis The index of the one-hot axis. + void set_axis(int64_t axis); + +protected: + int64_t m_axis; + +private: + friend void inline resolve_axis(OneHotBase* op); +}; +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/include/openvino/opsets/opset16_tbl.hpp b/src/core/include/openvino/opsets/opset16_tbl.hpp index 39d3d5d1d80889..6d01ab1d13cb22 100644 --- a/src/core/include/openvino/opsets/opset16_tbl.hpp +++ b/src/core/include/openvino/opsets/opset16_tbl.hpp @@ -19,3 +19,4 @@ _OPENVINO_OP_REG(ISTFT, ov::op::v16) _OPENVINO_OP_REG(SegmentMax, ov::op::v16) _OPENVINO_OP_REG(SparseFillEmptyRows, ov::op::v16) _OPENVINO_OP_REG(AvgPool, ov::op::v16) +_OPENVINO_OP_REG(OneHot, ov::op::v16) diff --git a/src/core/reference/include/openvino/reference/one_hot.hpp b/src/core/reference/include/openvino/reference/one_hot.hpp index 7ae708c7ce84b3..10261e3962a675 100644 --- a/src/core/reference/include/openvino/reference/one_hot.hpp +++ b/src/core/reference/include/openvino/reference/one_hot.hpp @@ -5,6 +5,7 @@ #pragma once #include "openvino/core/shape.hpp" +#include "openvino/op/one_hot.hpp" namespace ov { namespace reference { @@ -16,7 +17,9 @@ void one_hot(const INPUT_TYPE* indices, const size_t depth, const int64_t one_hot_axis, const char* on_value, - const char* off_value) { + const char* off_value, + const op::v16::OneHot::NegativeIndicesMode mode) { + const bool is_mode_normalize = mode == op::v16::OneHot::NegativeIndicesMode::NORMALIZE; const size_t num_ind = shape_size(indices_shape); // Step 1: Set off_value to the output. for (auto p = out; p < out + num_ind * depth * out_elem_size; p += out_elem_size) @@ -31,11 +34,14 @@ void one_hot(const INPUT_TYPE* indices, // Step 2: Write on_value at needed positions for (size_t outer_i = 0; outer_i < num_ind; outer_i += inner_block) { for (size_t inner_i = 0; inner_i < inner_block; inner_i++) { - auto input_val = indices[outer_i + inner_i]; - // Negative indices are ignored - if ((input_val >= 0) && (static_cast(input_val) < depth)) { - auto oh_index = static_cast(input_val); - size_t output_offset = out_elem_size * (outer_i * depth + inner_i + oh_index * inner_block); + const int64_t input_val = static_cast(indices[outer_i + inner_i]); + const int64_t depth_i64 = static_cast(depth); + const int64_t actual_index = (input_val < 0 && is_mode_normalize) ? depth_i64 + input_val : input_val; + const int64_t max_valid = depth_i64 - 1; + + if (actual_index >= 0 && actual_index <= max_valid) { + const size_t output_offset = + out_elem_size * (outer_i * depth + inner_i + static_cast(actual_index) * inner_block); std::copy(on_value, on_value + out_elem_size, out + output_offset); } } diff --git a/src/core/shape_inference/include/one_hot_shape_inference.hpp b/src/core/shape_inference/include/one_hot_shape_inference.hpp index d661879cc9a31a..10419d23dac9ef 100644 --- a/src/core/shape_inference/include/one_hot_shape_inference.hpp +++ b/src/core/shape_inference/include/one_hot_shape_inference.hpp @@ -10,7 +10,6 @@ namespace ov { namespace op { namespace util { - template struct GetNotNegative { const Node* m_op; @@ -23,9 +22,8 @@ struct GetNotNegative { return static_cast(v); } }; -} // namespace util -namespace v1 { -void inline resolve_axis(OneHot* op) { + +void inline resolve_axis(OneHotBase* op) { if (op->get_input_size() < 1) { return; } @@ -33,12 +31,12 @@ void inline resolve_axis(OneHot* op) { if (indices_shape.rank().is_static()) { op->m_axis = ov::util::try_normalize_axis(op->m_axis, indices_shape.rank() + 1, *op); } -} +} // namespace util template > -std::vector shape_infer(const OneHot* op, - const std::vector& input_shapes, - const ITensorAccessor& ta = make_tensor_accessor()) { +std::vector shape_infer_base(const OneHotBase* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { NODE_VALIDATION_CHECK(op, input_shapes.size() == 4); using DimType = typename T::value_type; const auto& indices_shape = input_shapes[0]; @@ -77,6 +75,25 @@ std::vector shape_infer(const OneHot* op, } return output_shapes; } +} // namespace util + +namespace v1 { +template > +std::vector shape_infer(const OneHot* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + return util::shape_infer_base(op, input_shapes, ta); +} } // namespace v1 + +namespace v16 { +template > +std::vector shape_infer(const OneHot* op, + const std::vector& input_shapes, + const ITensorAccessor& ta = make_tensor_accessor()) { + return util::shape_infer_base(op, input_shapes, ta); +} +} // namespace v16 + } // namespace op } // namespace ov diff --git a/src/core/src/op/one_hot.cpp b/src/core/src/op/one_hot.cpp index 240c78e657a418..5243fd0ac8ce24 100644 --- a/src/core/src/op/one_hot.cpp +++ b/src/core/src/op/one_hot.cpp @@ -25,7 +25,8 @@ struct Evaluate : element::NoAction { const int64_t one_hot_axis, const char* const on_value, const char* const off_value, - const int64_t axis) { + const int64_t axis, + const v16::OneHot::NegativeIndicesMode mode) { reference::one_hot(indices.data(), indices_shape, output_data, @@ -33,76 +34,23 @@ struct Evaluate : element::NoAction { one_hot_axis, axis, on_value, - off_value); + off_value, + mode); return true; } }; -} // namespace one_hot - -namespace v1 { -OneHot::OneHot(const Output& indices, - const Output& depth, - const Output& on_value, - const Output& off_value, - int64_t axis) - : Op({indices, depth, on_value, off_value}), - m_axis(axis) { - mark_as_precision_sensitive(input(1)); - constructor_validate_and_infer_types(); -} - -void OneHot::validate_and_infer_types() { - OV_OP_SCOPE(v1_OneHot_validate_and_infer_types); - const auto& indices_et = get_input_element_type(0); - const auto& depth_et = get_input_element_type(1); - const auto& on_value_et = get_input_element_type(2); - const auto& off_value_et = get_input_element_type(3); - - NODE_VALIDATION_CHECK(this, - indices_et.is_dynamic() || indices_et.is_integral(), - "Indices must be integral element type."); - - NODE_VALIDATION_CHECK(this, - depth_et.is_dynamic() || depth_et.is_integral(), - "Depth must be integral element type."); - - NODE_VALIDATION_CHECK(this, - on_value_et.compatible(off_value_et), - "on_value element type must be compatible with off_value element type."); - - const auto& indices_shape = get_input_partial_shape(0); - const auto& depth_shape = get_input_partial_shape(1); - const auto& on_value_shape = get_input_partial_shape(2); - const auto& off_value_shape = get_input_partial_shape(3); - - std::vector input_shapes = {indices_shape, depth_shape, on_value_shape, off_value_shape}; - resolve_axis(this); - const auto output_shapes = shape_infer(this, input_shapes); - - set_output_type(0, on_value_et, output_shapes[0]); -} - -bool OneHot::visit_attributes(AttributeVisitor& visitor) { - OV_OP_SCOPE(v1_OneHot_visit_attributes); - visitor.on_attribute("axis", m_axis); - return true; -} -std::shared_ptr OneHot::clone_with_new_inputs(const OutputVector& new_args) const { - OV_OP_SCOPE(v1_OneHot_clone_with_new_inputs); - check_new_args_count(this, new_args); - return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_axis); -} - -bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const { - OV_OP_SCOPE(v1_OneHot_evaluate); +static bool evaluate_impl(const util::OneHotBase* const op, + TensorVector& outputs, + const TensorVector& inputs, + const v16::OneHot::NegativeIndicesMode mode) { OPENVINO_ASSERT(inputs.size() == 4 && outputs.size() == 1); const auto output_shape = - shape_infer(this, ov::util::get_tensors_partial_shapes(inputs), make_tensor_accessor(inputs)) + shape_infer_base(op, ov::util::get_tensors_partial_shapes(inputs), make_tensor_accessor(inputs)) .front() .to_shape(); - const auto axis = get_axis(); + const auto axis = op->get_axis(); OPENVINO_ASSERT(axis >= 0 && static_cast(axis) < output_shape.size(), "Invalid axis value."); const auto depth = v0::Constant{inputs[1]}.cast_vector()[0]; @@ -118,7 +66,7 @@ bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const { auto& output = outputs[0]; output.set_shape(output_shape); using namespace ov::element; - return IF_TYPE_OF(v1_OneHot_evaluate, + return IF_TYPE_OF(oneHot_evaluate, OV_PP_ET_LIST(i32, i64), one_hot::Evaluate, indices.get_element_type(), @@ -129,7 +77,42 @@ bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const { output.get_shape()[axis], on_value, off_value, - axis); + axis, + mode); +} +} // namespace one_hot + +namespace v1 { +OneHot::OneHot(const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis) + : OneHotBase(indices, depth, on_value, off_value, axis) { + mark_as_precision_sensitive(input(1)); + constructor_validate_and_infer_types(); +} + +void OneHot::validate_and_infer_types() { + OV_OP_SCOPE(v1_OneHot_validate_and_infer_types); + OneHotBase::validate_and_infer_types(); +} + +bool OneHot::visit_attributes(AttributeVisitor& visitor) { + OV_OP_SCOPE(v1_OneHot_visit_attributes); + OneHotBase::visit_attributes(visitor); + return true; +} + +std::shared_ptr OneHot::clone_with_new_inputs(const OutputVector& new_args) const { + OV_OP_SCOPE(v1_OneHot_clone_with_new_inputs); + check_new_args_count(this, new_args); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_axis); +} + +bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(v1_OneHot_evaluate); + return one_hot::evaluate_impl(this, outputs, inputs, v16::OneHot::NegativeIndicesMode::IGNORE_NEGATIVE); } bool OneHot::has_evaluate() const { @@ -143,10 +126,76 @@ bool OneHot::has_evaluate() const { } } -void OneHot::set_axis(int64_t axis) { - m_axis = axis; - resolve_axis(this); -} } // namespace v1 + +namespace v16 { +OneHot::OneHot(const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis, + NegativeIndicesMode mode) + : OneHotBase(indices, depth, on_value, off_value, axis), + m_negative_indices_mode(mode) { + mark_as_precision_sensitive(input(1)); + constructor_validate_and_infer_types(); +} + +void OneHot::validate_and_infer_types() { + OV_OP_SCOPE(v16_OneHot_validate_and_infer_types); + OneHotBase::validate_and_infer_types(); +} + +bool OneHot::visit_attributes(AttributeVisitor& visitor) { + OV_OP_SCOPE(v16_OneHot_visit_attributes); + OneHotBase::visit_attributes(visitor); + visitor.on_attribute("negative_indices_mode", m_negative_indices_mode); + return true; +} + +std::shared_ptr OneHot::clone_with_new_inputs(const OutputVector& new_args) const { + OV_OP_SCOPE(v16_OneHot_clone_with_new_inputs); + check_new_args_count(this, new_args); + return std::make_shared(new_args.at(0), + new_args.at(1), + new_args.at(2), + new_args.at(3), + m_axis, + m_negative_indices_mode); +} + +bool OneHot::evaluate(TensorVector& outputs, const TensorVector& inputs) const { + OV_OP_SCOPE(v16_OneHot_evaluate); + return one_hot::evaluate_impl(this, outputs, inputs, get_negative_indices_mode()); +} + +bool OneHot::has_evaluate() const { + OV_OP_SCOPE(v16_OneHot_has_evaluate); + switch (get_input_element_type(0)) { + case element::i32: + case element::i64: + return true; + default: + return false; + } +} + +} // namespace v16 } // namespace op -} // namespace ov + +std::ostream& operator<<(std::ostream& s, const op::v16::OneHot::NegativeIndicesMode& mode) { + return s << as_string(mode); +} + +template <> +OPENVINO_API EnumNames& EnumNames::get() { + static auto enum_names = EnumNames( + "op::v16::OneHot::NegativeIndicesMode", + {{"ignore_negative", op::v16::OneHot::NegativeIndicesMode::IGNORE_NEGATIVE}, + {"normalize", op::v16::OneHot::NegativeIndicesMode::NORMALIZE}}); + return enum_names; +} + +AttributeAdapter::~AttributeAdapter() = default; + +} // namespace ov \ No newline at end of file diff --git a/src/core/src/op/util/one_hot_base.cpp b/src/core/src/op/util/one_hot_base.cpp new file mode 100644 index 00000000000000..af8c5ea82af4a3 --- /dev/null +++ b/src/core/src/op/util/one_hot_base.cpp @@ -0,0 +1,72 @@ +// Copyright (C) 2018-2025 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "openvino/op/util/one_hot_base.hpp" + +#include "itt.hpp" +#include "one_hot_shape_inference.hpp" + +namespace ov { +namespace op { +namespace util { + +OneHotBase::OneHotBase(const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis) + : Op({indices, depth, on_value, off_value}), + m_axis(axis) {} + +void OneHotBase::validate_and_infer_types() { + OV_OP_SCOPE(util_OneHotBase_validate_and_infer_types); + const auto& indices_et = get_input_element_type(0); + const auto& depth_et = get_input_element_type(1); + const auto& on_value_et = get_input_element_type(2); + const auto& off_value_et = get_input_element_type(3); + + NODE_VALIDATION_CHECK(this, + indices_et.is_dynamic() || indices_et.is_integral(), + "Indices must be integral element type."); + + NODE_VALIDATION_CHECK(this, + depth_et.is_dynamic() || depth_et.is_integral(), + "Depth must be integral element type."); + + NODE_VALIDATION_CHECK(this, + on_value_et.compatible(off_value_et), + "on_value element type must be compatible with off_value element type."); + + const auto& indices_shape = get_input_partial_shape(0); + const auto& depth_shape = get_input_partial_shape(1); + const auto& on_value_shape = get_input_partial_shape(2); + const auto& off_value_shape = get_input_partial_shape(3); + + std::vector input_shapes = {indices_shape, depth_shape, on_value_shape, off_value_shape}; + resolve_axis(this); + const auto output_shapes = shape_infer_base(this, input_shapes); + + set_output_type(0, on_value_et, output_shapes[0]); +} + +bool OneHotBase::visit_attributes(AttributeVisitor& visitor) { + OV_OP_SCOPE(util_OneHotBase_visit_attributes); + visitor.on_attribute("axis", m_axis); + return true; +} + +void OneHotBase::set_axis(int64_t axis) { + m_axis = axis; + resolve_axis(this); +} + +std::shared_ptr OneHotBase::clone_with_new_inputs(const OutputVector& new_args) const { + OV_OP_SCOPE(util_OneHotBase_clone_with_new_inputs); + check_new_args_count(this, new_args); + return std::make_shared(new_args.at(0), new_args.at(1), new_args.at(2), new_args.at(3), m_axis); +} + +} // namespace util +} // namespace op +} // namespace ov diff --git a/src/core/tests/opset.cpp b/src/core/tests/opset.cpp index 8671772f2dbc88..cfb4636d2a2a5d 100644 --- a/src/core/tests/opset.cpp +++ b/src/core/tests/opset.cpp @@ -77,7 +77,7 @@ INSTANTIATE_TEST_SUITE_P(opset, OpsetTestParams{ov::get_opset13, 186}, OpsetTestParams{ov::get_opset14, 188}, OpsetTestParams{ov::get_opset15, 199}, - OpsetTestParams{ov::get_opset16, 8}), + OpsetTestParams{ov::get_opset16, 9}), OpsetTestNameGenerator{}); class MyOpOld : public ov::op::Op { diff --git a/src/core/tests/pass/constant_folding.cpp b/src/core/tests/pass/constant_folding.cpp index f74aec8bdd44f0..9f6c1de5b5c11b 100644 --- a/src/core/tests/pass/constant_folding.cpp +++ b/src/core/tests/pass/constant_folding.cpp @@ -2952,7 +2952,8 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_3_splits_neg_length) { res3_values); } -TEST(constant_folding, constant_v1_one_hot) { +template +void OneHotConstantFoldingGenericTest(const TOpFunc& op_func) { const vector indices{0, 1, 2}; const float on_value = 1.123f; const float off_value = 0.321f; @@ -2963,15 +2964,15 @@ TEST(constant_folding, constant_v1_one_hot) { const auto off_const = ov::op::v0::Constant::create(element::f32, Shape{}, {off_value}); int64_t axis = 1; - auto one_hot_v1 = make_shared(indices_const, depth_const, on_const, off_const, axis); - auto f = make_shared(one_hot_v1, ParameterVector{}); + auto one_hot = op_func(indices_const, depth_const, on_const, off_const, axis); + auto f = make_shared(one_hot, ParameterVector{}); run_constant_folding(f); - ASSERT_EQ(count_ops_of_type(f), 0); + ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto res = get_result_constant(f); + std::shared_ptr res = get_result_constant(f); ASSERT_TRUE(res); ASSERT_EQ((Shape{3, 3}), res->get_output_shape(0)); @@ -2980,6 +2981,31 @@ TEST(constant_folding, constant_v1_one_hot) { res->get_vector()); } +TEST(constant_folding, constant_v1_one_hot) { + OneHotConstantFoldingGenericTest([](const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis) { + return make_shared(indices, depth, on_value, off_value, axis); + }); +} + +TEST(constant_folding, constant_v16_one_hot) { + OneHotConstantFoldingGenericTest([](const Output& indices, + const Output& depth, + const Output& on_value, + const Output& off_value, + int64_t axis) { + return make_shared(indices, + depth, + on_value, + off_value, + axis, + op::v16::OneHot::NegativeIndicesMode::NORMALIZE); + }); +} + TEST(constant_folding, constant_v1_one_hot_negative_axes) { const vector indices{0, 2, 3, 1}; const int32_t on_value = 4; diff --git a/src/core/tests/type_prop/one_hot.cpp b/src/core/tests/type_prop/one_hot.cpp index fce91e289f4f87..517dd4da223921 100644 --- a/src/core/tests/type_prop/one_hot.cpp +++ b/src/core/tests/type_prop/one_hot.cpp @@ -14,39 +14,44 @@ using namespace std; using namespace ov; using namespace testing; -TEST(type_prop, one_hot_v1_output_shape) { +template +class OneHotTest : public TypePropOpTest {}; + +TYPED_TEST_SUITE_P(OneHotTest); + +TYPED_TEST_P(OneHotTest, one_hot_output_shape) { auto indices = make_shared(element::i64, Shape{3}); auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {2}); auto on_value = ov::op::v0::Constant::create(element::u32, Shape{}, {5}); auto off_value = ov::op::v0::Constant::create(element::u32, Shape{}, {10}); int64_t axis = -1; - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::u32); ASSERT_EQ(ont_hot->get_shape(), (Shape{3, 2})); auto dyn_indices = make_shared(element::i64, PartialShape{{1, 3}}); - auto dyn_ont_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); + auto dyn_ont_hot = this->make_op(dyn_indices, depth, on_value, off_value, axis); ASSERT_EQ(dyn_ont_hot->get_output_element_type(0), element::u32); ASSERT_EQ(dyn_ont_hot->get_output_partial_shape(0), (PartialShape{{1, 3}, 2})); } -TEST(type_prop, one_hot_v1_output_shape_2) { +TYPED_TEST_P(OneHotTest, one_hot_output_shape_2) { auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {4}); auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 3; - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::f32); ASSERT_EQ(ont_hot->get_shape(), (Shape{1, 3, 2, 4, 3})); auto dyn_indices = make_shared(element::i64, PartialShape{1, {3, 5}, 2, 3}); - auto dyn_ont_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); + auto dyn_ont_hot = this->make_op(dyn_indices, depth, on_value, off_value, axis); ASSERT_EQ(dyn_ont_hot->get_output_element_type(0), element::f32); ASSERT_EQ(dyn_ont_hot->get_output_partial_shape(0), (PartialShape{1, {3, 5}, 2, 4, 3})); } -TEST(type_prop, one_hot_v1_indices_symbols) { +TYPED_TEST_P(OneHotTest, one_hot_indices_symbols) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; auto symbols = set_shape_symbols(ind_shape); @@ -59,7 +64,7 @@ TEST(type_prop, one_hot_v1_indices_symbols) { PartialShape expected_shape{-1, 4, {3, 5}, 2, 3}; ov::TensorSymbol expected_symbols = {symbols[0], nullptr, symbols[1], symbols[2], symbols[3]}; - auto dyn_one_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); + auto dyn_one_hot = this->make_op(dyn_indices, depth, on_value, off_value, axis); const auto& out_shape = dyn_one_hot->get_output_partial_shape(0); EXPECT_EQ(dyn_one_hot->get_output_element_type(0), element::f32); @@ -67,7 +72,7 @@ TEST(type_prop, one_hot_v1_indices_symbols) { EXPECT_EQ(get_shape_symbols(out_shape), expected_symbols); } -TEST(type_prop, one_hot_v1_depth_shape_of_value) { +TYPED_TEST_P(OneHotTest, one_hot_depth_shape_of_value) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; set_shape_symbols(ind_shape); @@ -85,14 +90,14 @@ TEST(type_prop, one_hot_v1_depth_shape_of_value) { PartialShape expected_shape{-1, 4, {3, 5}, 2, 3}; - auto dyn_one_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); + auto dyn_one_hot = this->make_op(dyn_indices, depth, on_value, off_value, axis); const auto& out_shape = dyn_one_hot->get_output_partial_shape(0); EXPECT_EQ(dyn_one_hot->get_output_element_type(0), element::f32); EXPECT_EQ(out_shape, expected_shape); } -TEST(type_prop, one_hot_v1_depth_value_symbol) { +TYPED_TEST_P(OneHotTest, one_hot_depth_value_symbol) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; auto symbols = set_shape_symbols(ind_shape); @@ -114,7 +119,7 @@ TEST(type_prop, one_hot_v1_depth_value_symbol) { PartialShape expected_shape{-1, {4, 6}, {3, 5}, 2, 3}; ov::TensorSymbol expected_symbols{symbols[0], depth_symbol, symbols[1], symbols[2], symbols[3]}; - auto dyn_one_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); + auto dyn_one_hot = this->make_op(dyn_indices, depth, on_value, off_value, axis); const auto& out_shape = dyn_one_hot->get_output_partial_shape(0); EXPECT_EQ(dyn_one_hot->get_output_element_type(0), element::f32); @@ -122,7 +127,7 @@ TEST(type_prop, one_hot_v1_depth_value_symbol) { EXPECT_EQ(get_shape_symbols(out_shape), expected_symbols); } -TEST(type_prop, one_hot_v1_output_symbols) { +TYPED_TEST_P(OneHotTest, one_hot_output_symbols) { auto ind_shape = PartialShape{-1, {3, 5}, 2, 3}; auto symbols = set_shape_symbols(ind_shape); @@ -135,7 +140,7 @@ TEST(type_prop, one_hot_v1_output_symbols) { PartialShape expected_shape{-1, 4, {3, 5}, 2, 3}; ov::TensorSymbol expected_symbols{symbols[0], nullptr, symbols[1], symbols[2], symbols[3]}; - auto dyn_one_hot = make_shared(dyn_indices, depth, on_value, off_value, axis); + auto dyn_one_hot = this->make_op(dyn_indices, depth, on_value, off_value, axis); const auto& out_shape = dyn_one_hot->get_output_partial_shape(0); EXPECT_EQ(dyn_one_hot->get_output_element_type(0), element::f32); @@ -143,13 +148,13 @@ TEST(type_prop, one_hot_v1_output_symbols) { EXPECT_EQ(get_shape_symbols(out_shape), expected_symbols); } -TEST(type_prop, one_hot_v1_default_constructor) { +TYPED_TEST_P(OneHotTest, one_hot_default_constructor) { auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {4}); auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = 3; - auto ont_hot = make_shared(); + auto ont_hot = this->make_op(); ont_hot->set_argument(0, indices); ont_hot->set_argument(1, depth); @@ -165,14 +170,14 @@ TEST(type_prop, one_hot_v1_default_constructor) { EXPECT_EQ(ont_hot->get_shape(), (Shape{1, 3, 2, 4, 3})); } -TEST(type_prop, one_hot_v1_indices_elem_not_integral) { +TYPED_TEST_P(OneHotTest, one_hot_indices_elem_not_integral) { auto indices = make_shared(element::f16, Shape{2, 2}); auto depth = make_shared(element::i64, Shape{}); auto on_value = make_shared(element::u32, Shape{}); auto off_value = make_shared(element::u32, Shape{}); int64_t axis = -1; try { - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Incorrect indices element type not detected"; } catch (const ov::Exception& error) { @@ -182,14 +187,14 @@ TEST(type_prop, one_hot_v1_indices_elem_not_integral) { } } -TEST(type_prop, one_hot_v1_depth_elem_not_integral) { +TYPED_TEST_P(OneHotTest, one_hot_depth_elem_not_integral) { auto indices = make_shared(element::i64, Shape{2, 2}); auto depth = make_shared(element::f16, Shape{}); auto on_value = make_shared(element::u32, Shape{}); auto off_value = make_shared(element::u32, Shape{}); int64_t axis = -1; try { - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Incorrect depth element type not detected"; } catch (const ov::Exception& error) { @@ -199,26 +204,26 @@ TEST(type_prop, one_hot_v1_depth_elem_not_integral) { } } -TEST(type_prop, one_hot_v1_negative_depth) { +TYPED_TEST_P(OneHotTest, one_hot_negative_depth) { auto indices = make_shared(element::i32, Shape{2, 2}); auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {-4}); auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); int64_t axis = -1; - OV_EXPECT_THROW(auto ont_hot = make_shared(indices, depth, on_value, off_value, axis), + OV_EXPECT_THROW(auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis), ov::Exception, HasSubstr("can't be negative.")); } -TEST(type_prop, one_hot_v1_on_off_values_not_compatible) { +TYPED_TEST_P(OneHotTest, one_hot_on_off_values_not_compatible) { auto indices = make_shared(element::i64, Shape{2, 2}); auto depth = make_shared(element::i64, Shape{}); auto on_value = make_shared(element::bf16, Shape{}); auto off_value = make_shared(element::f16, Shape{}); int64_t axis = -1; try { - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Incompatible on/off element types not detected"; } catch (const ov::Exception& error) { @@ -229,14 +234,14 @@ TEST(type_prop, one_hot_v1_on_off_values_not_compatible) { } } -TEST(type_prop, one_hot_v1_depth_not_scalar) { +TYPED_TEST_P(OneHotTest, one_hot_depth_not_scalar) { auto indices = make_shared(element::i64, Shape{2, 2}); auto depth = make_shared(element::i64, Shape{1}); auto on_value = make_shared(element::bf16, Shape{}); auto off_value = make_shared(element::bf16, Shape{}); int64_t axis = -1; try { - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Not scalar depth input not detected."; } catch (const ov::Exception& error) { @@ -246,14 +251,14 @@ TEST(type_prop, one_hot_v1_depth_not_scalar) { } } -TEST(type_prop, one_hot_v1_on_value_not_scalar) { +TYPED_TEST_P(OneHotTest, one_hot_on_value_not_scalar) { auto indices = make_shared(element::i64, Shape{2, 2}); auto depth = make_shared(element::i64, Shape{}); auto on_value = make_shared(element::bf16, Shape{2}); auto off_value = make_shared(element::bf16, Shape{}); int64_t axis = -1; try { - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Not scalar on_value input not detected."; } catch (const ov::Exception& error) { @@ -263,14 +268,14 @@ TEST(type_prop, one_hot_v1_on_value_not_scalar) { } } -TEST(type_prop, one_hot_v1_off_value_not_scalar) { +TYPED_TEST_P(OneHotTest, one_hot_off_value_not_scalar) { auto indices = make_shared(element::i64, Shape{2, 2}); auto depth = make_shared(element::i64, Shape{}); auto on_value = make_shared(element::bf16, Shape{}); auto off_value = make_shared(element::bf16, Shape{3}); int64_t axis = -1; try { - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); // Should have thrown, so fail if it didn't FAIL() << "Not scalar off_value input not detected."; } catch (const ov::Exception& error) { @@ -280,32 +285,54 @@ TEST(type_prop, one_hot_v1_off_value_not_scalar) { } } -TEST(type_prop, one_hot_v1_out_types_1) { +TYPED_TEST_P(OneHotTest, one_hot_out_types_1) { auto indices = make_shared(element::i32, Shape{3, 2}); auto depth = ov::op::v0::Constant::create(element::i32, Shape{}, {2}); int64_t axis = -1; auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {-3.3}); auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {-10.12}); - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::f32); } -TEST(type_prop, one_hot_v1_out_types_2) { +TYPED_TEST_P(OneHotTest, one_hot_out_types_2) { auto indices = make_shared(element::i64, Shape{3, 2}); auto depth = ov::op::v0::Constant::create(element::i32, Shape{}, {2}); int64_t axis = -1; auto on_value = ov::op::v0::Constant::create(element::i32, Shape{}, {-1}); auto off_value = ov::op::v0::Constant::create(element::i32, Shape{}, {7}); - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::i32); } -TEST(type_prop, one_hot_v1_out_types_3) { +TYPED_TEST_P(OneHotTest, one_hot_out_types_3) { auto indices = make_shared(element::i32, Shape{3, 2}); auto depth = ov::op::v0::Constant::create(element::i32, Shape{}, {2}); int64_t axis = -1; auto on_value = ov::op::v0::Constant::create(element::boolean, Shape{}, {true}); auto off_value = ov::op::v0::Constant::create(element::boolean, Shape{}, {false}); - auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); ASSERT_EQ(ont_hot->get_element_type(), element::boolean); } + +REGISTER_TYPED_TEST_SUITE_P(OneHotTest, + one_hot_output_shape, + one_hot_output_shape_2, + one_hot_indices_symbols, + one_hot_depth_shape_of_value, + one_hot_depth_value_symbol, + one_hot_output_symbols, + one_hot_default_constructor, + one_hot_indices_elem_not_integral, + one_hot_depth_elem_not_integral, + one_hot_negative_depth, + one_hot_on_off_values_not_compatible, + one_hot_depth_not_scalar, + one_hot_on_value_not_scalar, + one_hot_off_value_not_scalar, + one_hot_out_types_1, + one_hot_out_types_2, + one_hot_out_types_3); + +using OneHotTypes = Types; +INSTANTIATE_TYPED_TEST_SUITE_P(type_prop, OneHotTest, OneHotTypes); diff --git a/src/core/tests/visitors/op/one_hot.cpp b/src/core/tests/visitors/op/one_hot.cpp index e5f2ae9364d683..ce3994cb876a68 100644 --- a/src/core/tests/visitors/op/one_hot.cpp +++ b/src/core/tests/visitors/op/one_hot.cpp @@ -28,3 +28,25 @@ TEST(attributes, one_hot_op) { EXPECT_EQ(g_one_hot->get_axis(), one_hot->get_axis()); } + +TEST(attributes, one_hot_op_v16) { + NodeBuilder::opset().insert(); + auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); + auto depth = ov::op::v0::Constant::create(element::i64, Shape{}, {4}); + auto on_value = ov::op::v0::Constant::create(element::f32, Shape{}, {1.0f}); + auto off_value = ov::op::v0::Constant::create(element::f32, Shape{}, {0.0f}); + + int64_t axis = 3; + + auto one_hot = make_shared(indices, + depth, + on_value, + off_value, + axis, + op::v16::OneHot::NegativeIndicesMode::NORMALIZE); + NodeBuilder builder(one_hot, {indices, depth, on_value, off_value}); + auto g_one_hot = ov::as_type_ptr(builder.create()); + + EXPECT_EQ(g_one_hot->get_axis(), one_hot->get_axis()); + EXPECT_EQ(g_one_hot->get_negative_indices_mode(), one_hot->get_negative_indices_mode()); +} diff --git a/src/frontends/onnx/docs/supported_ops.md b/src/frontends/onnx/docs/supported_ops.md index 98966fff6f89fd..cb94890ce1cbc5 100644 --- a/src/frontends/onnx/docs/supported_ops.md +++ b/src/frontends/onnx/docs/supported_ops.md @@ -122,7 +122,7 @@ OpenVINO provides support for operations of Default Opset (empty in table below) | |NonMaxSuppression |10 |11, 10 | | | |NonZero |9 |13, 9 | | | |Not |1 |1 | | -| |OneHot |9 |11, 9 | | +| |OneHot |11, 9 |11, 9 | | | |Optional | |15 | | | |OptionalGetElement | |18, 15 | | | |OptionalHasElement | |18, 15 | | diff --git a/src/frontends/onnx/frontend/src/op/onehot.cpp b/src/frontends/onnx/frontend/src/op/onehot.cpp index bffad4d1ce6977..0c2ba28bedb1bd 100644 --- a/src/frontends/onnx/frontend/src/op/onehot.cpp +++ b/src/frontends/onnx/frontend/src/op/onehot.cpp @@ -14,8 +14,9 @@ namespace ov { namespace frontend { namespace onnx { namespace ai_onnx { -namespace opset_1 { -ov::OutputVector onehot(const ov::frontend::onnx::Node& node) { +namespace opset_common { +template +ov::OutputVector onehot_impl(const ov::frontend::onnx::Node& node) { ov::OutputVector inputs{node.get_ov_inputs()}; auto indices = std::make_shared(inputs.at(0), ov::element::i64); auto depth = std::make_shared(reshape::interpret_as_scalar(inputs.at(1)), ov::element::i64); @@ -28,11 +29,30 @@ ov::OutputVector onehot(const ov::frontend::onnx::Node& node) { auto axis = node.get_attribute_value("axis", -1); - return {std::make_shared(indices, depth, on_value, off_value, axis)}; + if constexpr (!NEEDS_NORMALIZED_INDICES_MODE) { + return {std::make_shared(indices, depth, on_value, off_value, axis)}; + } else { + return {std::make_shared(indices, + depth, + on_value, + off_value, + axis, + v16::OneHot::NegativeIndicesMode::NORMALIZE)}; + } } - -ONNX_OP("OneHot", OPSET_SINCE(1), ai_onnx::opset_1::onehot); +} // namespace opset_common +namespace opset_1 { +ov::OutputVector onehot(const ov::frontend::onnx::Node& node) { + return ai_onnx::opset_common::onehot_impl(node); +} +ONNX_OP("OneHot", OPSET_RANGE(1, 10), ai_onnx::opset_1::onehot); } // namespace opset_1 +namespace opset_11 { +ov::OutputVector onehot(const ov::frontend::onnx::Node& node) { + return ai_onnx::opset_common::onehot_impl(node); +} +ONNX_OP("OneHot", OPSET_SINCE(11), ai_onnx::opset_11::onehot); +} // namespace opset_11 } // namespace ai_onnx } // namespace onnx } // namespace frontend diff --git a/src/frontends/onnx/tests/__init__.py b/src/frontends/onnx/tests/__init__.py index b300dbd80fc916..0feda34fa7eb30 100644 --- a/src/frontends/onnx/tests/__init__.py +++ b/src/frontends/onnx/tests/__init__.py @@ -89,8 +89,6 @@ def xfail_test(reason="Mark the test as expected to fail", strict=True): xfail_issue_44958 = xfail_test(reason="Expected: Unsupported dynamic op: Interpolate") xfail_issue_44965 = xfail_test(reason="Expected: RuntimeError: value info has no element") xfail_issue_47323 = xfail_test(reason="RuntimeError: The plugin does not support FP64") -xfail_issue_73538 = xfail_test(reason="OneHot: Unsupported negative indices, " - "AssertionError: Mismatched elements.") # Model MSFT issues: xfail_issue_37957 = xfail_test(reason="RuntimeError: OV does not support the following ONNX operations: " "com.microsoft.CropAndResize, com.microsoft.GatherND, " diff --git a/src/frontends/onnx/tests/models/one_hot_negative_indices.prototxt b/src/frontends/onnx/tests/models/one_hot_negative_indices.prototxt new file mode 100644 index 00000000000000..0ddb16693a42aa --- /dev/null +++ b/src/frontends/onnx/tests/models/one_hot_negative_indices.prototxt @@ -0,0 +1,70 @@ +ir_version: 3 +producer_name: "OpenVINO ONNX Frontend" +graph { + node { + input: "indices" + input: "depth" + input: "values" + output: "y" + op_type: "OneHot" + } + name: "test_onehot_negative_indices" + initializer { + dims: 1 + data_type: 7 + int64_data: 12 + name: "depth" + } + initializer { + dims: 2 + data_type: 7 + int64_data: 2 + int64_data: 5 + name: "values" + } + input { + name: "indices" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 3 + } + } + } + } + } + input { + name: "depth" + type { + tensor_type { + elem_type: 7 + shape { + dim { + dim_value: 1 + } + } + } + } + } + output { + name: "y" + type { + tensor_type { + elem_type: 6 + shape { + dim { + dim_value: 3 + } + dim { + dim_value: 12 + } + } + } + } + } +} +opset_import { + version: 11 +} diff --git a/src/frontends/onnx/tests/onnx_import.in.cpp b/src/frontends/onnx/tests/onnx_import.in.cpp index 228e5dbf3716a1..2eee8b0bb19a35 100644 --- a/src/frontends/onnx/tests/onnx_import.in.cpp +++ b/src/frontends/onnx/tests/onnx_import.in.cpp @@ -2988,6 +2988,19 @@ OPENVINO_TEST(${BACKEND_NAME}, onnx_model_one_hot_without_axis) { test_case.run(); } +OPENVINO_TEST(${BACKEND_NAME}, onnx_model_one_hot_negative_indices) { + auto model = convert_model("one_hot_negative_indices.onnx"); + + std::vector> inputs{{0, -5, -4}}; + std::vector expected_output{5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, + 2, 5, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 5, 2, 2, 2}; + + auto test_case = ov::test::TestCase(model, s_device); + test_case.add_multiple_inputs(inputs); + test_case.add_expected_output(expected_output); + test_case.run(); +} + OPENVINO_TEST(${BACKEND_NAME}, onnx_model_where) { auto model = convert_model("where.onnx"); diff --git a/src/frontends/onnx/tests/tests_python/test_backend.py b/src/frontends/onnx/tests/tests_python/test_backend.py index 9a32349d3f347d..84ca45e3c4fdfb 100644 --- a/src/frontends/onnx/tests/tests_python/test_backend.py +++ b/src/frontends/onnx/tests/tests_python/test_backend.py @@ -236,10 +236,6 @@ def expect_fail(test_case_path, xfail): # type: (str) -> None "OnnxBackendNodeModelTest.test_nesterov_momentum_cpu", "OnnxBackendNodeModelTest.test_momentum_multiple_cpu", ), - ( - xfail_issue_73538, - "OnnxBackendNodeModelTest.test_onehot_negative_indices_cpu", - ), ( xfail_issue_33488, "OnnxBackendNodeModelTest.test_maxunpool_export_with_output_shape_cpu", diff --git a/src/plugins/intel_cpu/src/nodes/one_hot.cpp b/src/plugins/intel_cpu/src/nodes/one_hot.cpp index a02bd150bdf2b7..bd882b9a3050b5 100644 --- a/src/plugins/intel_cpu/src/nodes/one_hot.cpp +++ b/src/plugins/intel_cpu/src/nodes/one_hot.cpp @@ -27,16 +27,20 @@ #include "openvino/op/one_hot.hpp" #include "selective_build.h" #include "shape_inference/custom/one_hot.hpp" +#include "utils/general_utils.h" namespace ov::intel_cpu::node { bool OneHot::isSupportedOperation(const std::shared_ptr& op, std::string& errorMessage) noexcept { try { - const auto oneHot = ov::as_type_ptr(op); - if (!oneHot) { - errorMessage = "Only opset1 OneHot operation is supported"; + if (none_of(op->get_type_info(), + op::v1::OneHot::get_type_info_static(), + op::v16::OneHot::get_type_info_static())) { + errorMessage = "Only OneHot operations from opset1 and opset16 are supported"; return false; } + + const auto* oneHot = ov::as_type(op.get()); if (ov::as_type_ptr(oneHot->get_input_node_shared_ptr(ON_VALUE_ID)) == nullptr) { errorMessage = "Only const 'on_value' input is supported"; return false; @@ -65,6 +69,10 @@ OneHot::OneHot(const std::shared_ptr& op, const GraphContext::CPtr& co depth = depthNode->cast_vector()[0]; } axis = static_cast(oneHot->get_axis()); + if (const auto oneHot_v16 = ov::as_type_ptr(op)) { + is_mode_normalize = + oneHot_v16->get_negative_indices_mode() == ov::op::v16::OneHot::NegativeIndicesMode::NORMALIZE; + } VectorDims srcDims = getInputShapeAtPort(INDICES_ID).getDims(); if (ov::is_scalar(srcDims)) { @@ -134,9 +142,10 @@ void OneHot::one_hot(size_t prefix_size, size_t suffix_size) { const in_type* src_dataPtr = &src_data[prefix_idx * suffix_size]; out_type* dst_dataPtr = &dst_data[prefix_idx * depth * suffix_size]; for (std::size_t suffix_idx = 0; suffix_idx < suffix_size; ++suffix_idx, ++src_dataPtr, ++dst_dataPtr) { - auto v = static_cast(*src_dataPtr); - if (v < depth) { - dst_dataPtr[v * suffix_size] = on_val; + const in_type val = *src_dataPtr; + const in_type mapped_val = (val < 0 && is_mode_normalize) ? static_cast(depth) + val : val; + if (mapped_val >= 0 && mapped_val <= static_cast(depth) - 1) { + dst_dataPtr[mapped_val * suffix_size] = on_val; } } }); diff --git a/src/plugins/intel_cpu/src/nodes/one_hot.h b/src/plugins/intel_cpu/src/nodes/one_hot.h index 29ec691bfdefd2..031f0c600efd7c 100644 --- a/src/plugins/intel_cpu/src/nodes/one_hot.h +++ b/src/plugins/intel_cpu/src/nodes/one_hot.h @@ -56,6 +56,7 @@ class OneHot : public Node { mutable Dim depth = Shape::UNDEFINED_DIM; int32_t axis = -1; + bool is_mode_normalize = false; ov::element::Type output_precision; diff --git a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp index 9cb2fef328fef1..d95a3e361bcf21 100644 --- a/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/custom/one_hot.cpp @@ -16,7 +16,7 @@ #include "cpu_types.h" #include "openvino/core/except.hpp" #include "openvino/core/type.hpp" -#include "openvino/op/one_hot.hpp" +#include "openvino/op/util/one_hot_base.hpp" #include "shape_inference/shape_inference_cpu.hpp" #include "shape_inference/shape_inference_status.hpp" @@ -42,7 +42,7 @@ Result OneHotShapeInfer::infer(const std::vector(m_op); + auto oneHot = ov::as_type_ptr(m_op); OPENVINO_ASSERT(oneHot, "Unexpected op type in OneHot shape inference factory: ", m_op->get_type_name()); auto axis = oneHot->get_axis(); auto dstShape = oneHot->get_output_partial_shape(0); diff --git a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp index d883a5cf6c1777..90fb9af289802b 100644 --- a/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp +++ b/src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp @@ -576,6 +576,7 @@ using IStaticShapeInferFactory = template <> const IStaticShapeInferFactory::TRegistry IStaticShapeInferFactory::registry{ // opset16 + OV_OP_SHAPE_INFER_MASK_REG(op::v16::OneHot, ShapeInferTA, util::bit::mask(1)), OV_OP_SHAPE_INFER_MASK_REG(op::v16::AvgPool, ShapeInferPaddingTA, util::bit::mask()), OV_OP_SHAPE_INFER_MASK_REG(op::v16::ISTFT, ShapeInferTA, util::bit::mask(2, 3, 4)), OV_OP_SHAPE_INFER_MASK_REG(op::v16::SegmentMax, ShapeInferTA, util::bit::mask(1, 2)), diff --git a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/one_hot.cpp b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/one_hot.cpp index 6d92f109fba98b..2b7189dc32f9a2 100644 --- a/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/one_hot.cpp +++ b/src/plugins/intel_cpu/tests/functional/custom/single_layer_tests/one_hot.cpp @@ -11,23 +11,26 @@ using namespace CPUTestUtils; namespace ov { + +using OHMode = ov::op::v16::OneHot::NegativeIndicesMode; namespace test { using oneHotCPUTestParams = - std::tuple, // secondary input type && need to generate depth - size_t, // depth - float, // on_value - float, // off_value - ov::element::Type, // Output precision + std::tuple, // secondary input type && need to generate depth + size_t, // depth + float, // on_value + float, // off_value + ov::element::Type, // Output precision CPUSpecificParams>; class OneHotLayerCPUTest : public testing::WithParamInterface, virtual public SubgraphBaseTest, public CPUTestsBase { public: static std::string getTestCaseName(const testing::TestParamInfo& obj) { - const auto& [inputShape, axis, inputType, depth, onValue, offValue, outPrc, cpuParams] = obj.param; + const auto& [inputShape, axis, mode, inputType, depth, onValue, offValue, outPrc, cpuParams] = obj.param; std::ostringstream result; if (inputShape.first.size() != 0) { result << "IS=(" << ov::test::utils::partialShape2str({inputShape.first}) << "_"; @@ -37,6 +40,8 @@ class OneHotLayerCPUTest : public testing::WithParamInterface(Depth); + const uint32_t range = 2 * static_cast(Depth) - 1; + ov::test::utils::InputGenerateData inGenData(start, range, 1, 1); + tensor = ov::test::utils::create_and_fill_tensor(funcInput.get_element_type(), + targetInputStaticShapes[i], + inGenData); + } else if (i == 1) { tensor = ov::Tensor(funcInput.get_element_type(), targetInputStaticShapes[i]); auto *dataPtr = tensor.data(); dataPtr[0] = Depth; @@ -71,8 +82,10 @@ class OneHotLayerCPUTest : public testing::WithParamInterfaceGetParam(); + const auto& [inputShape, _Axis, _Mode, inputType, _Depth, _OnValue, _OffValue, _outType, cpuParams] = + this->GetParam(); Axis = _Axis; + Mode = _Mode; Depth = _Depth; OnValue = _OnValue; OffValue = _OffValue; @@ -129,7 +142,8 @@ class OneHotLayerCPUTest : public testing::WithParamInterface(outType, ov::Shape{ }, OnValue); auto off_value_const = std::make_shared(outType, ov::Shape{ }, OffValue); - auto oneHot = std::make_shared(params[0], depth, on_value_const, off_value_const, Axis); + auto oneHot = + std::make_shared(params[0], depth, on_value_const, off_value_const, Axis, Mode); return makeNgraphFunction(ov::element::i32, params, oneHot, "OneHot"); } void generateDepth() { @@ -139,6 +153,7 @@ class OneHotLayerCPUTest : public testing::WithParamInterface staticInputShapes0D = { const auto testCase_1d = ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes0D)), ::testing::Values(-1, 0), + ::testing::Values(OHMode::IGNORE_NEGATIVE, OHMode::NORMALIZE), ::testing::ValuesIn(secondaryInputTypesStaticCase), ::testing::Values(3), ::testing::Values(1.f), @@ -190,6 +206,7 @@ const std::vector staticInputShapes1D = { const auto testCase_2d_static = ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes1D)), ::testing::Values(-1, 0, 1), + ::testing::Values(OHMode::IGNORE_NEGATIVE), ::testing::ValuesIn(secondaryInputTypesStaticCase), ::testing::Values(6), ::testing::Values(1.f), @@ -207,6 +224,7 @@ const std::vector dynamicInputShapes1D = { const auto testCase_2d_dynamic = ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes1D), ::testing::Values(-1, 0, 1), + ::testing::Values(OHMode::IGNORE_NEGATIVE), ::testing::ValuesIn(secondaryInputTypesDynamicCase), ::testing::Values(6), ::testing::Values(1.f), @@ -223,6 +241,7 @@ const std::vector staticInputShapes2D = { const auto testCase_3d_static = ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes2D)), ::testing::Values(-1, 0, 1), + ::testing::Values(OHMode::IGNORE_NEGATIVE), ::testing::ValuesIn(secondaryInputTypesStaticCase), ::testing::Values(4), ::testing::Values(2.f), @@ -241,6 +260,7 @@ const std::vector dynamicInputShapes2D = { const auto testCase_3d_dynamic = ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes2D), ::testing::Values(-1, 0, 1), + ::testing::Values(OHMode::IGNORE_NEGATIVE), ::testing::ValuesIn(secondaryInputTypesDynamicCase), ::testing::Values(4), ::testing::Values(2.f), @@ -257,6 +277,7 @@ const std::vector staticInputShapes3D = { const auto testCase_4d_static = ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes3D)), ::testing::Values(-1, 0, 1, 2), + ::testing::Values(OHMode::IGNORE_NEGATIVE), ::testing::ValuesIn(secondaryInputTypesStaticCase), ::testing::Values(4), ::testing::Values(1.f), @@ -275,6 +296,7 @@ const std::vector dynamicInputShapes3D = { const auto testCase_4d_dynamic = ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes3D), ::testing::Values(-1, 0, 1, 2), + ::testing::Values(OHMode::IGNORE_NEGATIVE), ::testing::ValuesIn(secondaryInputTypesDynamicCase), ::testing::Values(4), ::testing::Values(1.f), @@ -291,6 +313,7 @@ const std::vector staticInputShapes4D = { const auto testCase_5d_static = ::testing::Combine( ::testing::ValuesIn(static_shapes_to_test_representation(staticInputShapes4D)), ::testing::Values(-1, 0, 1, 2, 3), + ::testing::Values(OHMode::IGNORE_NEGATIVE), ::testing::ValuesIn(secondaryInputTypesStaticCase), ::testing::Values(4), ::testing::Values(1.f), @@ -309,6 +332,7 @@ const std::vector dynamicInputShapes4D = { const auto testCase_5d_dynamic = ::testing::Combine( ::testing::ValuesIn(dynamicInputShapes4D), ::testing::Values(-1, 0, 1, 2, 3), + ::testing::Values(OHMode::IGNORE_NEGATIVE, OHMode::NORMALIZE), ::testing::ValuesIn(secondaryInputTypesDynamicCase), ::testing::Values(4), ::testing::Values(1.f), diff --git a/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp b/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp index 912e925fa765c7..b8099261adf281 100644 --- a/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp +++ b/src/plugins/intel_cpu/tests/unit/shape_inference_test/one_hot_shape_inference_test.cpp @@ -2,36 +2,42 @@ // SPDX-License-Identifier: Apache-2.0 // +#include "one_hot_shape_inference.hpp" + #include #include "common_test_utils/test_assertions.hpp" -#include "one_hot_shape_inference.hpp" #include "utils.hpp" using namespace ov; using namespace ov::intel_cpu; using namespace testing; -TEST(StaticShapeInferenceTest, OneHotTestConstantInput) { +template +class OneHotStaticShapeInference : public OpStaticShapeInferenceTest {}; + +TYPED_TEST_SUITE_P(OneHotStaticShapeInference); + +TYPED_TEST_P(OneHotStaticShapeInference, OneHotTestConstantInput) { auto indices = std::make_shared(element::i64, PartialShape{-1}); auto depth = op::v0::Constant::create(element::i64, ov::Shape{}, {2}); auto on_value = op::v0::Constant::create(element::u32, ov::Shape{}, {5}); auto off_value = op::v0::Constant::create(element::u32, ov::Shape{}, {10}); int64_t axis = -1; - auto ont_hot = std::make_shared(indices, depth, on_value, off_value, axis); + auto ont_hot = this->make_op(indices, depth, on_value, off_value, axis); // Test StaticShape std::vector static_input_shapes = {StaticShape{3}, StaticShape{}, StaticShape{}, StaticShape{}}; const auto static_output_shapes = shape_inference(ont_hot.get(), static_input_shapes); ASSERT_EQ(static_output_shapes[0], (StaticShape{3, 2})); } -TEST(StaticShapeInferenceTest, OneHotTestConstantMap) { +TYPED_TEST_P(OneHotStaticShapeInference, OneHotTestConstantMap) { auto indices = std::make_shared(element::i64, PartialShape{-1}); auto depth = std::make_shared(element::i64, ov::Shape{}); auto on_param = std::make_shared(element::i32, ov::Shape{}); auto off_param = std::make_shared(element::i32, ov::Shape{}); int64_t axis = -1; - auto ont_hot = std::make_shared(indices, depth, on_param, off_param, axis); + auto ont_hot = this->make_op(indices, depth, on_param, off_param, axis); int64_t depth_value[] = {2}; int32_t on_value[] = {1}; @@ -46,8 +52,8 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantMap) { EXPECT_EQ(static_output_shapes[0], (StaticShape{3, 2})); } -TEST(StaticShapeInferenceTest, OneHotTestConstantMapDefaultCtor) { - auto ont_hot = std::make_shared(); +TYPED_TEST_P(OneHotStaticShapeInference, OneHotTestConstantMapDefaultCtor) { + auto ont_hot = this->make_op(); ont_hot->set_axis(-1); int64_t depth_value[] = {2}; @@ -65,13 +71,13 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantMapDefaultCtor) { EXPECT_EQ(static_output_shapes[0], (StaticShape{3, 2})); } -TEST(StaticShapeInferenceTest, OneHotTestConstantMapNegativeDepth) { +TYPED_TEST_P(OneHotStaticShapeInference, OneHotTestConstantMapNegativeDepth) { auto indices = std::make_shared(element::i64, PartialShape{-1}); auto depth = std::make_shared(element::i64, ov::Shape{}); auto on_param = std::make_shared(element::i32, ov::Shape{}); auto off_param = std::make_shared(element::i32, ov::Shape{}); int64_t axis = -1; - auto ont_hot = std::make_shared(indices, depth, on_param, off_param, axis); + auto ont_hot = this->make_op(indices, depth, on_param, off_param, axis); int64_t depth_value[] = {-2}; int32_t on_value[] = {1}; @@ -87,3 +93,12 @@ TEST(StaticShapeInferenceTest, OneHotTestConstantMapNegativeDepth) { ov::NodeValidationFailure, HasSubstr("can't be negative")); } + +REGISTER_TYPED_TEST_SUITE_P(OneHotStaticShapeInference, + OneHotTestConstantInput, + OneHotTestConstantMap, + OneHotTestConstantMapDefaultCtor, + OneHotTestConstantMapNegativeDepth); + +using OneHotTypes = Types; +INSTANTIATE_TYPED_TEST_SUITE_P(shape_inference, OneHotStaticShapeInference, OneHotTypes); diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp index c08472654a317d..37734352b9f698 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/primitives_list.hpp @@ -283,6 +283,7 @@ REGISTER_FACTORY(v15, Col2Im); REGISTER_FACTORY(v16, ISTFT); REGISTER_FACTORY(v16, SparseFillEmptyRows); REGISTER_FACTORY(v16, AvgPool); +REGISTER_FACTORY(v16, OneHot); // --------------------------- Supported internal ops --------------------------- // REGISTER_FACTORY(internal, NonMaxSuppressionIEInternal); diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp index 2b151e0cee54a6..e084e294c4b56a 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/one_hot.hpp @@ -45,12 +45,14 @@ struct one_hot : public primitive_base { const tensor& shape, const int64_t& one_hot_axis, const int64_t& depth, + const bool indices_normalize_mode = false, const float& on_value = 1.0f, const float& off_value = 0.0f) : primitive_base(id, {input}) , shape(shape) , one_hot_axis(one_hot_axis) , depth(depth) + , indices_normalize_mode(indices_normalize_mode) , on_value(on_value) , off_value(off_value) {} @@ -61,11 +63,13 @@ struct one_hot : public primitive_base { const tensor& shape, const data_types output_dt, const int64_t& one_hot_axis, + const bool indices_normalize_mode = false, const float& on_value = 1.0f, const float& off_value = 0.0f) : primitive_base(id, {input, input_depth}, 1, {optional_data_type{output_dt}}) , shape(shape) , one_hot_axis(one_hot_axis) + , indices_normalize_mode(indices_normalize_mode) , on_value(on_value) , off_value(off_value) {} @@ -81,12 +85,14 @@ struct one_hot : public primitive_base { const data_types output_dt, const int64_t& one_hot_axis, const int64_t& depth, + const bool indices_normalize_mode = false, const float& on_value = 1.0f, const float& off_value = 0.0f) : primitive_base(id, {input}, 1, {optional_data_type{output_dt}}) , shape(shape) , one_hot_axis(one_hot_axis) , depth(depth) + , indices_normalize_mode(indices_normalize_mode) , on_value(on_value) , off_value(off_value) {} @@ -96,6 +102,8 @@ struct one_hot : public primitive_base { int64_t one_hot_axis = 0; /// @brief The number of classes and thus the size of the one-hot dimension int64_t depth = 0; + /// @brief Negative indices mode, read specification to find out more. + bool indices_normalize_mode = false; /// @brief The locations represented by indices in indices take this value. float on_value = 1.0f; /// @brief all other locations take value this value. @@ -106,6 +114,7 @@ struct one_hot : public primitive_base { seed = hash_combine(seed, one_hot_axis); seed = hash_combine(seed, on_value); seed = hash_combine(seed, off_value); + seed = hash_combine(seed, indices_normalize_mode); return seed; } @@ -118,7 +127,8 @@ struct one_hot : public primitive_base { return one_hot_axis == rhs_casted.one_hot_axis && depth == rhs_casted.depth && on_value == rhs_casted.on_value && - off_value == rhs_casted.off_value; + off_value == rhs_casted.off_value && + indices_normalize_mode == rhs_casted.indices_normalize_mode; } void save(BinaryOutputBuffer& ob) const override { @@ -128,6 +138,7 @@ struct one_hot : public primitive_base { ob << depth; ob << on_value; ob << off_value; + ob << indices_normalize_mode; } void load(BinaryInputBuffer& ib) override { @@ -137,6 +148,7 @@ struct one_hot : public primitive_base { ib >> depth; ib >> on_value; ib >> off_value; + ib >> indices_normalize_mode; } }; } // namespace cldnn diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/one_hot.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/one_hot.cpp index 5f0a66112b4da3..a60ce2a03da550 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/one_hot.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/one_hot.cpp @@ -30,6 +30,7 @@ struct one_hot_impl : typed_primitive_impl_ocl { params.one_hot_axis = primitive->one_hot_axis; params.on_value = primitive->on_value; params.off_value = primitive->off_value; + params.indices_normalize_mode = primitive->indices_normalize_mode; auto output_sizes = impl_param.get_output_layout().get_dims(); diff --git a/src/plugins/intel_gpu/src/graph/one_hot.cpp b/src/plugins/intel_gpu/src/graph/one_hot.cpp index 6c1222baa17b0e..6a17ff2c37a706 100644 --- a/src/plugins/intel_gpu/src/graph/one_hot.cpp +++ b/src/plugins/intel_gpu/src/graph/one_hot.cpp @@ -49,8 +49,8 @@ std::vector one_hot_inst::calc_output_layouts(const one_hot_node& /*node auto desc = impl_param.typed_desc(); auto input_layout = impl_param.get_input_layout(0); auto dt = desc->output_data_types[0].value_or(input_layout.data_type); - - ov::op::v1::OneHot op; +// + ov::op::util::OneHotBase op; try { // set_axis also calls resolve_axis method which tries to get input0 partial shape // thus wrap this call with try/catch. @@ -84,7 +84,7 @@ std::vector one_hot_inst::calc_output_layouts(const one_hot_node& /*node } std::vector output_shapes = - ov::op::v1::shape_infer(&op, input_shapes, ov::make_tensor_accessor(const_data)); + ov::op::util::shape_infer_base(&op, input_shapes, ov::make_tensor_accessor(const_data)); return {{output_shapes[0], dt, format::get_default_format(output_shapes[0].size())}}; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/one_hot_ref.cl b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/one_hot_ref.cl index b66f9db9b96982..d6577bea7576e2 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/one_hot_ref.cl +++ b/src/plugins/intel_gpu/src/kernel_selector/cl_kernels/one_hot_ref.cl @@ -35,8 +35,9 @@ KERNEL(one_hot_ref)(const __global INPUT0_TYPE* input, // Put in the 1; ignore bad input values INPUT0_TYPE val = input[GET_COORDS_INDEX(INPUT0, in_coords)]; - if (val >= 0 && val < ONE_HOT_LIMIT) { - out_coords[ONE_HOT_AXIS] = val; + const INPUT0_TYPE mapped_index = (val < 0 && INDICES_NORMALIZE_MODE) ? (ONE_HOT_LIMIT + val) : val; + if (mapped_index >= 0 && mapped_index < ONE_HOT_LIMIT) { + out_coords[ONE_HOT_AXIS] = mapped_index; output[GET_COORDS_INDEX(OUTPUT, out_coords)] = TO_OUTPUT_TYPE(ON_VALUE); } } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.cpp index ea9ecba4bac7d2..91dbe6f96a57c7 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.cpp @@ -14,7 +14,8 @@ JitConstants OneHotKernelBase::GetJitConstants(const one_hot_params& params) con MakeJitConstant("ONE_HOT_AXIS", params.one_hot_axis), MakeJitConstant("ONE_HOT_LIMIT", params.one_hot_limit), MakeJitConstant("ON_VALUE", params.on_value), - MakeJitConstant("OFF_VALUE", params.off_value) + MakeJitConstant("OFF_VALUE", params.off_value), + MakeJitConstant("INDICES_NORMALIZE_MODE", params.indices_normalize_mode) }); return jit; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.h b/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.h index abd76a53ccb2ab..95b75bbd92e6d2 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/one_hot/one_hot_kernel_base.h @@ -12,12 +12,12 @@ namespace kernel_selector { // one_hot_params //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// struct one_hot_params : public base_params { - one_hot_params() : base_params(KernelType::ONE_HOT), - one_hot_axis(0), one_hot_limit(0), on_value(1.0), off_value(1.0) {} + one_hot_params() : base_params(KernelType::ONE_HOT), one_hot_axis(0), one_hot_limit(0), on_value(1.0), off_value(1.0), indices_normalize_mode(false) {} uint16_t one_hot_axis; int32_t one_hot_limit; float on_value; float off_value; + bool indices_normalize_mode; }; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp index f1d8c1c382de4b..04226df35b366a 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp @@ -2,16 +2,15 @@ // SPDX-License-Identifier: Apache-2.0 // -#include "intel_gpu/plugin/program_builder.hpp" -#include "intel_gpu/plugin/common_utils.hpp" -#include "transformations/utils/utils.hpp" - #include "openvino/op/one_hot.hpp" + +#include "intel_gpu/plugin/common_utils.hpp" +#include "intel_gpu/plugin/program_builder.hpp" #include "intel_gpu/primitives/one_hot.hpp" +#include "transformations/utils/utils.hpp" namespace ov::intel_gpu { - -static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptr& op) { +static void CreateOneHotOpGeneric(ProgramBuilder& p, const std::shared_ptr& op, bool is_normalize_mode) { validate_inputs_count(op, {4}); auto inputs = p.GetInputInfo(op); std::string layerName = layer_type_name_ID(op); @@ -22,13 +21,16 @@ static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptr(op->get_input_node_shared_ptr(3)); OPENVINO_ASSERT(on_value_node != nullptr || off_value_node != nullptr || depth_value_node != nullptr, - "[GPU] Unsupported on/off/depth nodes type in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); + "[GPU] Unsupported on/off/depth nodes type in ", + op->get_friendly_name(), + " (", + op->get_type_name(), + ")"); float on_value; float off_value; - if (!ov::op::util::get_single_value(on_value_node, on_value) || - !ov::op::util::get_single_value(off_value_node, off_value)) { + if (!ov::op::util::get_single_value(on_value_node, on_value) || !ov::op::util::get_single_value(off_value_node, off_value)) { OPENVINO_THROW("Unsupported parameter size in ", op->get_friendly_name(), " (", op->get_type_name(), ")"); } @@ -58,6 +60,7 @@ static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptrget_output_element_type(0)), axis, depth, + is_normalize_mode, on_value, off_value); @@ -69,6 +72,7 @@ static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptrget_output_element_type(0)), axis, + is_normalize_mode, on_value, off_value); @@ -76,6 +80,17 @@ static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptr& op) { + CreateOneHotOpGeneric(p, op, false); +} + REGISTER_FACTORY_IMPL(v1, OneHot); +static void CreateOneHotOp(ProgramBuilder& p, const std::shared_ptr& op) { + const bool is_normalize_mode = (op->get_negative_indices_mode() == ov::op::v16::OneHot::NegativeIndicesMode::NORMALIZE); + CreateOneHotOpGeneric(p, op, is_normalize_mode); +} + +REGISTER_FACTORY_IMPL(v16, OneHot); + } // namespace ov::intel_gpu diff --git a/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp b/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp index 44088744513aca..a4bb385911aabf 100644 --- a/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/unit/test_cases/one_hot_gpu_test.cpp @@ -825,6 +825,61 @@ TEST(one_hot_gpu_i64, bfzyx_ax3) { ASSERT_EQ(test_is_correct, true); } +static void PerformNegativeIndicesModeTest(cldnn::engine& engine, const std::vector& expected, bool is_mode_normalize) { + const int64_t depth = 4; + const int64_t axis = 4; + const float on_value = 3.0f; + const float off_value = 1.0f; + + std::vector indices = {0, -1, -2, 12, -3}; + + auto input_layout = cldnn::layout({cldnn::data_types::i32, cldnn::format::bfyx, cldnn::tensor(1, 1, 5, 1)}); + auto input_mem = engine.allocate_memory(input_layout); + set_values(input_mem, indices); + + cldnn::topology topology; + topology.add(cldnn::input_layout("input", input_layout)); + topology.add(cldnn::one_hot("one_hot", + input_info("input"), + cldnn::tensor(1, 1, 4, 5, 1), + cldnn::data_types::f32, + axis, + depth, + is_mode_normalize, + on_value, + off_value)); + + cldnn::network network(engine, topology); + network.set_input_data("input", input_mem); + auto outputs = network.execute(); + auto output = outputs.at("one_hot").get_memory(); + + cldnn::mem_lock output_ptr(output, get_test_stream()); + for (size_t i = 0; i < expected.size(); ++i) { + ASSERT_FLOAT_EQ(output_ptr[i], expected[i]) << "Mismatch at index " << i << ": expected " << expected[i] << ", got " << output_ptr[i] << std::endl; + } +} + +TEST(one_hot_gpu, negative_indices_mode_normalize) { + auto& engine = get_test_engine(); + + std::vector expected = { + 3.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 3.f, 1.f, 1.f, 3.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 3.f, 1.f, 1.f, + }; + + PerformNegativeIndicesModeTest(engine, expected, true); +} + +TEST(one_hot_gpu, negative_indices_mode_ignore_negative) { + auto& engine = get_test_engine(); + + std::vector expected = { + 3.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, + }; + + PerformNegativeIndicesModeTest(engine, expected, false); +} + TEST(one_hot_error, basic_error_wrong_axis) { auto& engine = get_test_engine(); auto input = engine.allocate_memory({ data_types::i32, format::bfyx, tensor{ 1, 1, 1, 1 } }); diff --git a/src/plugins/template/backend/opset_int_tbl.hpp b/src/plugins/template/backend/opset_int_tbl.hpp index 3a83a3539d14c5..e7c9006c3d84f1 100644 --- a/src/plugins/template/backend/opset_int_tbl.hpp +++ b/src/plugins/template/backend/opset_int_tbl.hpp @@ -182,6 +182,7 @@ _OPENVINO_OP_REG(Identity, ov::op::v16) _OPENVINO_OP_REG(ISTFT, ov::op::v16) _OPENVINO_OP_REG(SegmentMax, ov::op::v16) _OPENVINO_OP_REG(SparseFillEmptyRows, ov::op::v16) +_OPENVINO_OP_REG(OneHot, ov::op::v16) _OPENVINO_OP_REG(AUGRUCell, ov::op::internal) _OPENVINO_OP_REG(AUGRUSequence, ov::op::internal) diff --git a/src/plugins/template/tests/functional/op_reference/one_hot.cpp b/src/plugins/template/tests/functional/op_reference/one_hot.cpp index 9e4c092b6c0c5c..a6ac08e2570228 100644 --- a/src/plugins/template/tests/functional/op_reference/one_hot.cpp +++ b/src/plugins/template/tests/functional/op_reference/one_hot.cpp @@ -13,14 +13,14 @@ using namespace reference_tests; using namespace ov; namespace { -struct OneHotParams { - OneHotParams(const reference_tests::Tensor& dataTensor, - const int32_t axis, - const reference_tests::Tensor& depthTensor, - const reference_tests::Tensor& onValueTensor, - const reference_tests::Tensor& offValueTensor, - const reference_tests::Tensor& expectedTensor, - const std::string& testcaseName = "") +struct OneHotParams_v1 { + OneHotParams_v1(const reference_tests::Tensor& dataTensor, + const int32_t axis, + const reference_tests::Tensor& depthTensor, + const reference_tests::Tensor& onValueTensor, + const reference_tests::Tensor& offValueTensor, + const reference_tests::Tensor& expectedTensor, + const std::string& testcaseName = "") : dataTensor(dataTensor), axis(axis), depthTensor(depthTensor), @@ -29,6 +29,27 @@ struct OneHotParams { expectedTensor(expectedTensor), testcaseName(testcaseName) {} + std::string ToStr() const { + std::ostringstream result; + result << "dType=" << dataTensor.type; + result << "_dShape=" << dataTensor.shape; + result << "_axis=" << axis; + result << "_deType=" << depthTensor.type; + result << "_deShape=" << depthTensor.shape; + result << "_onType=" << onValueTensor.type; + result << "_onShape=" << onValueTensor.shape; + result << "_offType=" << offValueTensor.type; + result << "_offShape=" << offValueTensor.shape; + result << "_eType=" << expectedTensor.type; + if (testcaseName != "") { + result << "_eShape=" << expectedTensor.shape; + result << "_=" << testcaseName; + } else { + result << "_eShape=" << expectedTensor.shape; + } + return result.str(); + } + reference_tests::Tensor dataTensor; int32_t axis; reference_tests::Tensor depthTensor; @@ -38,39 +59,63 @@ struct OneHotParams { std::string testcaseName; }; -class ReferenceOneHotTest : public testing::TestWithParam, public CommonReferenceTest { +struct OneHotParams_v16 : public OneHotParams_v1 { + OneHotParams_v16(const reference_tests::Tensor& dataTensor, + const int32_t axis, + const reference_tests::Tensor& depthTensor, + const reference_tests::Tensor& onValueTensor, + const reference_tests::Tensor& offValueTensor, + const reference_tests::Tensor& expectedTensor, + ov::op::v16::OneHot::NegativeIndicesMode mode, + const std::string& testcaseName = "") + : OneHotParams_v1(dataTensor, axis, depthTensor, onValueTensor, offValueTensor, expectedTensor, testcaseName), + mode(mode) {} + + std::string ToStr() const { + std::string result_v1 = OneHotParams_v1::ToStr(); + if (mode == op::v16::OneHot::NegativeIndicesMode::NORMALIZE) { + result_v1 += "_mode=NORMALIZE"; + } else { + result_v1 += "_mode=IGNORE_NEGATIVE"; + } + return result_v1; + } + + static OneHotParams_v16 From_v1(const OneHotParams_v1& params) { + return OneHotParams_v16(params.dataTensor, + params.axis, + params.depthTensor, + params.onValueTensor, + params.offValueTensor, + params.expectedTensor, + ov::op::v16::OneHot::NegativeIndicesMode::IGNORE_NEGATIVE, + params.testcaseName); + } + + op::v16::OneHot::NegativeIndicesMode mode; +}; + +template +class ReferenceOneHotTest_Base : public testing::TestWithParam, public CommonReferenceTest { public: void SetUp() override { - auto params = GetParam(); + auto params = this->GetParam(); function = CreateFunction(params); inputData = {params.dataTensor.data}; refOutData = {params.expectedTensor.data}; } - static std::string getTestCaseName(const testing::TestParamInfo& obj) { + static std::string getTestCaseName(const testing::TestParamInfo& obj) { auto param = obj.param; - std::ostringstream result; - result << "dType=" << param.dataTensor.type; - result << "_dShape=" << param.dataTensor.shape; - result << "_axis=" << param.axis; - result << "_deType=" << param.depthTensor.type; - result << "_deShape=" << param.depthTensor.shape; - result << "_onType=" << param.onValueTensor.type; - result << "_onShape=" << param.onValueTensor.shape; - result << "_offType=" << param.offValueTensor.type; - result << "_offShape=" << param.offValueTensor.shape; - result << "_eType=" << param.expectedTensor.type; - if (param.testcaseName != "") { - result << "_eShape=" << param.expectedTensor.shape; - result << "_=" << param.testcaseName; - } else { - result << "_eShape=" << param.expectedTensor.shape; - } - return result.str(); + return param.ToStr(); } -private: - static std::shared_ptr CreateFunction(const OneHotParams& params) { + virtual std::shared_ptr CreateFunction(const TParams& params) = 0; +}; + +class ReferenceOneHotTest_v1 : public ReferenceOneHotTest_Base { +public: + std::shared_ptr CreateFunction(const OneHotParams_v1& params) override { std::shared_ptr function; const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); const auto depth = std::make_shared(params.depthTensor.type, @@ -88,7 +133,31 @@ class ReferenceOneHotTest : public testing::TestWithParam, public } }; -TEST_P(ReferenceOneHotTest, CompareWithRefs) { +class ReferenceOneHotTest_v16 : public ReferenceOneHotTest_Base { +public: + std::shared_ptr CreateFunction(const OneHotParams_v16& params) override { + std::shared_ptr function; + const auto data = std::make_shared(params.dataTensor.type, params.dataTensor.shape); + const auto depth = std::make_shared(params.depthTensor.type, + params.depthTensor.shape, + params.depthTensor.data.data()); + const auto onValue = std::make_shared(params.onValueTensor.type, + params.onValueTensor.shape, + params.onValueTensor.data.data()); + const auto offValue = std::make_shared(params.offValueTensor.type, + params.offValueTensor.shape, + params.offValueTensor.data.data()); + const auto oneHot = std::make_shared(data, depth, onValue, offValue, params.axis, params.mode); + function = std::make_shared(oneHot, ParameterVector{data}); + return function; + } +}; + +TEST_P(ReferenceOneHotTest_v1, CompareWithRefs) { + Exec(); +} + +TEST_P(ReferenceOneHotTest_v16, CompareWithRefs) { Exec(); } @@ -103,83 +172,84 @@ std::vector generateExpectedValues(const Shape& input_shape, std::vector i } template -std::vector generateParams() { +std::vector generateParams_v1() { using T1 = typename element_type_traits::value_type; using T2 = typename element_type_traits::value_type; - std::vector params{ - OneHotParams(reference_tests::Tensor(ET1, {}, std::vector{2}), - 0, - reference_tests::Tensor(ET1, {}, std::vector{3}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor(ET2, {3}, std::vector{0, 0, 1}), - "one_hot_scalar_2_in_3"), - OneHotParams(reference_tests::Tensor(ET1, {}, std::vector{1}), - 0, - reference_tests::Tensor(ET1, {}, std::vector{3}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor(ET2, {3}, std::vector{0, 1, 0}), - "one_hot_scalar_1_in_3"), - OneHotParams(reference_tests::Tensor(ET1, {}, std::vector{0}), - 0, - reference_tests::Tensor(ET1, {}, std::vector{3}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor(ET2, {3}, std::vector{1, 0, 0}), - "one_hot_scalar_0_in_3"), - OneHotParams(reference_tests::Tensor(ET1, {8}, std::vector{2, 1, 0, 0, 2, 2, 1, 0}), - 0, - reference_tests::Tensor(ET1, {}, std::vector{3}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor(ET2, {3, 8}, std::vector{0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, - 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0}), - "one_hot_vector_0"), - OneHotParams(reference_tests::Tensor(ET1, {8}, std::vector{2, 1, 0, 0, 2, 2, 1, 0}), - 1, - reference_tests::Tensor(ET1, {}, std::vector{3}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor(ET2, {8, 3}, std::vector{0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, - 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0}), - "one_hot_vector_1"), - OneHotParams(reference_tests::Tensor(ET1, {8}, std::vector{2, 1, 0, 0, 3, 2, 1, 0}), - 1, - reference_tests::Tensor(ET1, {}, std::vector{3}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor(ET2, {8, 3}, std::vector{0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0}), - "one_hot_vector_1_barely_oob"), - OneHotParams(reference_tests::Tensor(ET1, {3, 3}, std::vector{0, 1, 1, 2, 1, 0, 0, 2, 1}), - 0, - reference_tests::Tensor(ET1, {}, std::vector{3}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor(ET2, {3, 3, 3}, std::vector{1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, - 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0}), - "one_hot_matrix_0"), - OneHotParams(reference_tests::Tensor(ET1, {6}, std::vector{0, 11, 101, 1001, 10001, 19999}), - 1, - reference_tests::Tensor(ET1, {}, std::vector{20000}), - reference_tests::Tensor(ET2, {}, std::vector{1}), - reference_tests::Tensor(ET2, {}, std::vector{0}), - reference_tests::Tensor( - ET2, - {6, 20000}, - generateExpectedValues({6, 20000}, std::vector{0, 11, 101, 1001, 10001, 19999}, 20000)), - "one_hot_vector_many_categories"), + std::vector params{ + OneHotParams_v1(reference_tests::Tensor(ET1, {}, std::vector{2}), + 0, + reference_tests::Tensor(ET1, {}, std::vector{3}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor(ET2, {3}, std::vector{0, 0, 1}), + "one_hot_scalar_2_in_3"), + OneHotParams_v1(reference_tests::Tensor(ET1, {}, std::vector{1}), + 0, + reference_tests::Tensor(ET1, {}, std::vector{3}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor(ET2, {3}, std::vector{0, 1, 0}), + "one_hot_scalar_1_in_3"), + OneHotParams_v1(reference_tests::Tensor(ET1, {}, std::vector{0}), + 0, + reference_tests::Tensor(ET1, {}, std::vector{3}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor(ET2, {3}, std::vector{1, 0, 0}), + "one_hot_scalar_0_in_3"), + OneHotParams_v1(reference_tests::Tensor(ET1, {8}, std::vector{2, 1, 0, 0, 2, 2, 1, 0}), + 0, + reference_tests::Tensor(ET1, {}, std::vector{3}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor(ET2, {3, 8}, std::vector{0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0}), + "one_hot_vector_0"), + OneHotParams_v1(reference_tests::Tensor(ET1, {8}, std::vector{2, 1, 0, 0, 2, 2, 1, 0}), + 1, + reference_tests::Tensor(ET1, {}, std::vector{3}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor(ET2, {8, 3}, std::vector{0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, + 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0}), + "one_hot_vector_1"), + OneHotParams_v1(reference_tests::Tensor(ET1, {8}, std::vector{2, 1, 0, 0, 3, 2, 1, 0}), + 1, + reference_tests::Tensor(ET1, {}, std::vector{3}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor(ET2, {8, 3}, std::vector{0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0}), + "one_hot_vector_1_barely_oob"), + OneHotParams_v1( + reference_tests::Tensor(ET1, {3, 3}, std::vector{0, 1, 1, 2, 1, 0, 0, 2, 1}), + 0, + reference_tests::Tensor(ET1, {}, std::vector{3}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor(ET2, {3, 3, 3}, std::vector{1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, + 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0}), + "one_hot_matrix_0"), + OneHotParams_v1(reference_tests::Tensor(ET1, {6}, std::vector{0, 11, 101, 1001, 10001, 19999}), + 1, + reference_tests::Tensor(ET1, {}, std::vector{20000}), + reference_tests::Tensor(ET2, {}, std::vector{1}), + reference_tests::Tensor(ET2, {}, std::vector{0}), + reference_tests::Tensor( + ET2, + {6, 20000}, + generateExpectedValues({6, 20000}, std::vector{0, 11, 101, 1001, 10001, 19999}, 20000)), + "one_hot_vector_many_categories"), }; return params; } template -std::vector generateParamsFloat() { +std::vector generateParams_Float_v1() { using T1 = typename element_type_traits::value_type; using T2 = typename element_type_traits::value_type; - std::vector params{ - OneHotParams( + std::vector params{ + OneHotParams_v1( reference_tests::Tensor(ET1, {3, 3}, std::vector{0, 1, 1, 2, 1, 0, 0, 2, 1}), 0, reference_tests::Tensor(ET1, {}, std::vector{3}), @@ -193,28 +263,28 @@ std::vector generateParamsFloat() { return params; } -std::vector generateCombinedParams() { - const std::vector> generatedParams{ - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParams(), - generateParamsFloat(), - generateParamsFloat(), - generateParamsFloat(), - generateParamsFloat(), - generateParamsFloat(), - generateParamsFloat(), +std::vector generateCombinedParams_v1() { + const std::vector> generatedParams{ + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_v1(), + generateParams_Float_v1(), + generateParams_Float_v1(), + generateParams_Float_v1(), + generateParams_Float_v1(), + generateParams_Float_v1(), + generateParams_Float_v1(), }; - std::vector combinedParams; + std::vector combinedParams; for (const auto& params : generatedParams) { combinedParams.insert(combinedParams.end(), params.begin(), params.end()); @@ -222,8 +292,34 @@ std::vector generateCombinedParams() { return combinedParams; } +std::vector generateCombinedParams_v16() { + const auto params_v1 = generateCombinedParams_v1(); + std::vector params_v16; + params_v16.reserve(params_v1.size()); + for (const auto& p : params_v1) { + params_v16.push_back(OneHotParams_v16::From_v1(p)); + } + + params_v16.push_back(OneHotParams_v16( + reference_tests::Tensor(element::Type_t::i32, {8}, std::vector{-1, -2, 0, 0, 2, -1, 1, 0}), + 0, + reference_tests::Tensor(element::Type_t::i32, {}, std::vector{3}), + reference_tests::Tensor(element::Type_t::f32, {}, std::vector{1}), + reference_tests::Tensor(element::Type_t::f32, {}, std::vector{0}), + reference_tests::Tensor(element::Type_t::f32, {3, 8}, std::vector{0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, + 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0}), + op::v16::OneHot::NegativeIndicesMode::NORMALIZE, + "one_hot_normalize_mode")); + return params_v16; +} + +INSTANTIATE_TEST_SUITE_P(smoke_OneHot_With_Hardcoded_Refs, + ReferenceOneHotTest_v1, + testing::ValuesIn(generateCombinedParams_v1()), + ReferenceOneHotTest_v1::getTestCaseName); + INSTANTIATE_TEST_SUITE_P(smoke_OneHot_With_Hardcoded_Refs, - ReferenceOneHotTest, - testing::ValuesIn(generateCombinedParams()), - ReferenceOneHotTest::getTestCaseName); + ReferenceOneHotTest_v16, + testing::ValuesIn(generateCombinedParams_v16()), + ReferenceOneHotTest_v16::getTestCaseName); } // namespace diff --git a/src/tests/functional/plugin/shared/src/single_op/one_hot.cpp b/src/tests/functional/plugin/shared/src/single_op/one_hot.cpp index 7d70a80c2892e9..e83de453932320 100644 --- a/src/tests/functional/plugin/shared/src/single_op/one_hot.cpp +++ b/src/tests/functional/plugin/shared/src/single_op/one_hot.cpp @@ -38,7 +38,7 @@ std::string OneHotLayerTest::getTestCaseName(const testing::TestParamInfo