class ov::op::v8::I420toBGR¶
Overview¶
Color conversion operation from I420 to BGR format. Input : More…
#include <i420_to_bgr.hpp>
class I420toBGR: public ov::op::util::ConvertColorI420Base
{
public:
// fields
BWDCMP_RTTI_DECLARATION;
// construction
I420toBGR();
I420toBGR(const Output<Node>& arg);
I420toBGR(
const Output<Node>& arg_y,
const Output<Node>& arg_u,
const Output<Node>& arg_v
);
// methods
OPENVINO_OP("I420toBGR", "opset8", util::ConvertColorI420Base);
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& new_args) const;
};
Inherited Members¶
public:
// typedefs
typedef DiscreteTypeInfo type_info_t;
typedef std::map<std::string, Any> RTMap;
// enums
enum ColorConversion;
// methods
virtual void validate_and_infer_types();
void constructor_validate_and_infer_types();
virtual bool visit_attributes(AttributeVisitor&);
virtual const ov::op::AutoBroadcastSpec& get_autob() const;
virtual bool has_evaluate() const;
virtual bool evaluate(
const ov::HostTensorVector& output_values,
const ov::HostTensorVector& input_values
) const;
virtual bool evaluate(
const ov::HostTensorVector& output_values,
const ov::HostTensorVector& input_values,
const EvaluationContext& evaluationContext
) const;
virtual bool evaluate_lower(const ov::HostTensorVector& output_values) const;
virtual bool evaluate_upper(const ov::HostTensorVector& output_values) const;
virtual bool evaluate(
ov::TensorVector& output_values,
const ov::TensorVector& input_values
) const;
virtual bool evaluate(
ov::TensorVector& output_values,
const ov::TensorVector& input_values,
const ov::EvaluationContext& evaluationContext
) const;
virtual bool evaluate_lower(ov::TensorVector& output_values) const;
virtual bool evaluate_upper(ov::TensorVector& output_values) const;
virtual bool evaluate_label(TensorLabelVector& output_labels) const;
virtual bool constant_fold(
OutputVector& output_values,
const OutputVector& inputs_values
);
virtual OutputVector decompose_op() const;
virtual const type_info_t& get_type_info() const = 0;
const char \* get_type_name() const;
void set_arguments(const NodeVector& arguments);
void set_arguments(const OutputVector& arguments);
void set_argument(size_t position, const Output<Node>& argument);
void set_output_type(
size_t i,
const element::Type& element_type,
const PartialShape& pshape
);
void set_output_size(size_t output_size);
void invalidate_values();
virtual void revalidate_and_infer_types();
virtual std::string description() const;
const std::string& get_name() const;
void set_friendly_name(const std::string& name);
const std::string& get_friendly_name() const;
virtual bool is_dynamic() const;
size_t get_instance_id() const;
virtual std::ostream& write_description(std::ostream& os, uint32_t depth = 0) const;
const std::vector<std::shared_ptr<Node>>& get_control_dependencies() const;
const std::vector<Node \*>& get_control_dependents() const;
void add_control_dependency(std::shared_ptr<Node> node);
void remove_control_dependency(std::shared_ptr<Node> node);
void clear_control_dependencies();
void clear_control_dependents();
void add_node_control_dependencies(std::shared_ptr<Node> source_node);
void add_node_control_dependents(std::shared_ptr<Node> source_node);
void transfer_control_dependents(std::shared_ptr<Node> replacement);
size_t get_output_size() const;
const element::Type& get_output_element_type(size_t i) const;
const element::Type& get_element_type() const;
const Shape& get_output_shape(size_t i) const;
const PartialShape& get_output_partial_shape(size_t i) const;
Output<const Node> get_default_output() const;
Output<Node> get_default_output();
virtual size_t get_default_output_index() const;
size_t no_default_index() const;
const Shape& get_shape() const;
descriptor::Tensor& get_output_tensor(size_t i) const;
descriptor::Tensor& get_input_tensor(size_t i) const;
const std::string& get_output_tensor_name(size_t i) const;
std::set<Input<Node>> get_output_target_inputs(size_t i) const;
size_t get_input_size() const;
const element::Type& get_input_element_type(size_t i) const;
const Shape& get_input_shape(size_t i) const;
const PartialShape& get_input_partial_shape(size_t i) const;
const std::string& get_input_tensor_name(size_t i) const;
Node \* get_input_node_ptr(size_t index) const;
std::shared_ptr<Node> get_input_node_shared_ptr(size_t index) const;
Output<Node> get_input_source_output(size_t i) const;
virtual std::shared_ptr<Node> clone_with_new_inputs(const OutputVector& inputs) const = 0;
std::shared_ptr<Node> copy_with_new_inputs(const OutputVector& new_args) const;
std::shared_ptr<Node> copy_with_new_inputs(
const OutputVector& inputs,
const std::vector<std::shared_ptr<Node>>& control_dependencies
) const;
bool has_same_type(std::shared_ptr<const Node> node) const;
RTMap& get_rt_info();
const RTMap& get_rt_info() const;
NodeVector get_users(bool check_is_used = false) const;
virtual size_t get_version() const;
virtual std::shared_ptr<Node> get_default_value() const;
bool operator < (const Node& other) const;
std::vector<Input<Node>> inputs();
std::vector<Input<const Node>> inputs() const;
std::vector<Output<Node>> input_values() const;
std::vector<Output<Node>> outputs();
std::vector<Output<const Node>> outputs() const;
Input<Node> input(size_t input_index);
Input<const Node> input(size_t input_index) const;
Output<Node> input_value(size_t input_index) const;
Output<Node> output(size_t output_index);
Output<const Node> output(size_t output_index) const;
OPENVINO_SUPPRESS_DEPRECATED_START void set_op_annotations(std::shared_ptr<ngraph::op::util::OpAnnotations> op_annotations);
std::shared_ptr<ngraph::op::util::OpAnnotations> get_op_annotations() const;
virtual OPENVINO_SUPPRESS_DEPRECATED_END bool match_value(
ov::pass::pattern::Matcher \* matcher,
const Output<Node>& pattern_value,
const Output<Node>& graph_value
);
virtual bool match_node(
ov::pass::pattern::Matcher \* matcher,
const Output<Node>& graph_value
);
static _OPENVINO_HIDDEN_METHODconst ::ov::Node::type_info_t& get_type_info_static();
virtual const ::ov::Node::type_info_t& get_type_info() const;
OPENVINO_OP("ConvertColorI420Base", "util");
virtual void validate_and_infer_types();
virtual bool visit_attributes(AttributeVisitor& visitor);
Detailed Documentation¶
Color conversion operation from I420 to BGR format. Input :
Input NV12 image can be represented in two ways: a) Single plane (as it is in the file): I420 height dimension is 1.5x bigger than image height. ‘C’ dimension shall be 1. b) Three separate planes (used this way in many physical video sources): Y, U and V. In this case b1) Y plane has height same as image height. ‘C’ dimension equals to 1 b2) U plane has dimensions: ‘H’ = image_h / 2; ‘W’ = image_w / 2; ‘C’ = 1. b3) V plane has dimensions: ‘H’ = image_h / 2; ‘W’ = image_w / 2; ‘C’ = 1.
Supported element types: u8 or any supported floating-point type. Output :
Output node will have NHWC layout and shape HxW same as image spatial dimensions.
Number of output channels ‘C’ will be 3, as per interleaved BGR format, first channel is B, last is R
Conversion of each pixel from I420 (YUV) to RGB space is represented by following formulas: R = 1.164 * (Y - 16) + 1.596 * (V - 128) G = 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128) B = 1.164 * (Y - 16) + 2.018 * (U - 128) Then R, G, B values are clipped to range (0, 255)
Construction¶
Constructs a conversion operation from input image in I420 format As per I420 format definition, node height dimension shall be 1.5 times bigger than image height so that image (w=640, h=480) is represented by NHWC shape {N,720,640,1} (height*1.5 x width)
Parameters:
arg |
Node that produces the input tensor. Input tensor represents image in NV12 format (YUV). |
Constructs a conversion operation from 2-plane input image in NV12 format In general case Y channel of image can be separated from UV channel which means that operation needs two nodes for Y and UV planes respectively. Y plane has one channel, and UV has 2 channels, both expect ‘NHWC’ layout.
Parameters:
arg_y |
Node that produces the input tensor for Y plane (NHWC layout). Shall have WxH dimensions equal to image dimensions. ‘C’ dimension equals to 1. |
arg_u |
Node that produces the input tensor for U plane (NHWC layout). ‘H’ is half of image height, ‘W’ is half of image width, ‘C’ dimension equals to 1. |
arg_v |
Node that produces the input tensor for V plane (NHWC layout). ‘H’ is half of image height, ‘W’ is half of image width, ‘C’ dimension equals to 1. |