/*
* Copyright (c) 2022 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @addtogroup NNRt
* @{
*
* @brief Provides a unified interface for AI chip drivers to access OpenHarmony.
* Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field.
* It serves as a bridge between the upper-layer AI inference framework and the underlying acceleration chip to implement cross-chip inference computing of AI models.
* @since 3.2
* @version 1.0
*/
/**
* @file NnrtTypes.idl
*
* @brief Defines the types used in the HDI methods.
*
* @since 3.2
* @version 1.0
*/
/**
* @brief Defines the package path of the NNRt module.
*
* @since 3.2
* @version 1.0
*/
package ohos.hdi.nnrt.v1_0;
/**
* @brief Defines the shared memory data structure.
*
* @since 3.2
* @version 1.0
*/
struct SharedBuffer {
/** File descriptor of the shared memory. */
FileDescriptor fd;
/** Size of the shared memory, in bytes. */
unsigned int bufferSize;
/** Offset of the start address of the valid data in the shared memory. */
unsigned int offset;
/** Space occupied by the valid data, in bytes. */
unsigned int dataSize;
};
/**
* @brief Enumerates the AI chip types.
*
* @since 3.2
* @version 1.0
*/
enum DeviceType: int {
/** Other type */
OTHER,
/** CPU chip */
CPU,
/** GPU chip */
GPU,
/** AI acceleration chip, such as NPU chip and DSP chip */
ACCELERATOR
};
/**
* @brief Enumerates the AI chip states.
*
* @since 3.2
* @version 1.0
*/
enum DeviceStatus: int {
/** Available. */
AVAILABLE,
/** Busy. The chip in this state may not respond to computing tasks in a timely manner. */
BUSY,
/** Offline. The chip in this state cannot respond to computing tasks. */
OFFLINE,
/** Unknown state. */
UNKNOWN
};
/**
* @brief Enumerates the performance modes for a chip to perform AI computing.
*
* @since 3.2
* @version 1.0
*/
enum PerformanceMode: int {
/** No performance mode is specified. The specific running mode is defined by the chip. */
PERFORMANCE_NONE,
/** Low-performance mode, which provides slow AI computing but low power consumption. */
PERFORMANCE_LOW,
/** Medium-performance mode, which provides moderate computing speed and power consumption. */
PERFORMANCE_MEDIUM,
/** High-performance mode, which provides fast AI computing but high power consumption. */
PERFORMANCE_HIGH,
/** Extreme-performance mode, which provides the fastest AI computing but highest power consumption. */
PERFORMANCE_EXTREME
};
/**
* @brief Enumerates the AI computing task priorities.
*
* @since 3.2
* @version 1.0
*/
enum Priority: int {
/** No task priority is specified. The specific execution policy is defined by the chip. */
PRIORITY_NONE,
/** Low priority. A task with a higher priority will be executed first. */
PRIORITY_LOW,
/** Medium priority. A task with a higher priority will be executed first. */
PRIORITY_MEDIUM,
/** High priority. High-priority tasks are executed first. */
PRIORITY_HIGH
};
/**
* @brief Defines the parameters required for model building.
*
* @since 3.2
* @version 1.0
*/
struct ModelConfig {
/** Whether to run a Float32 model in Float16 precision. */
boolean enableFloat16;
/** Performance mode of the computing task. For details, see {@link PerformanceMode}. */
enum PerformanceMode mode;
/** Priority of the computing task. For details, see {@link Priority}. */
enum Priority priority;
};
/**
* @brief Enumerates the operator data formats. This parameter must be used together with {@link Tensor}.
*
* @since 3.2
* @version 1.0
*/
enum Format : byte {
/** Format initial value. */
FORMAT_NONE = -1,
/** NCHW, which indicates the number of data samples, image channels, image height, and image width in sequence. */
FORMAT_NCHW = 0,
/** NHWC. */
FORMAT_NHWC = 1
};
/**
* @brief Defines the quantization parameter structure.
*
* In the following formula, q is a quantized parameter, r is a real parameter, \f$ r_{max} \f$ is the maximum value of the data to be quantized, \f$ r_{min} \f$ is the minimum value of the data to be quantized, and round(x) means to round off x to an integer.
\f[
\text{clamp}(x,min,max) =
\begin{cases}
\text{max} & \text{ if } x > \text{ max } \\
\text{min} & \text{ if } x < \text{ min } \\
x & \text{ otherwise } \\
\end{cases}
\f]
* Formula for transforming a real number from a floating-point representation to a fixed-point representation:
\f[
\text{q}(x_i) = clamp(round(\frac{r}{scale}+zeroPoint), min , max)
\f]
* Formula for transforming a real number from a fixed-point representation to a floating-point representation:
\f[
\text{r}= (q-zeroPoint)*scale
\f]
* scale is calculated by using the following formula:
\f[
scale = \frac{r_{max}-r_{min}}{q_{max}-q_{min}}
\f]
* zeroPoint is calculated by using the following formula:
\f[
zeroPoint = round(q_{min}-\frac{r_{min}}{scale})
\f]
* \f$ q_{min},q_{max} \f$ is calculated by using the following formula:
\f[
q_{min} = -(1<<(numBits-1))
\f]
\f[
q_{max} = (1<<(numBits-1))-1
\f]
* When \f$ r_{min} \f$ and \f$ r_{max} \f$ are 0, scale and zeroPoint must be 0.
*
* @since 3.2
* @version 1.0
*/
struct QuantParam {
/** Number of quantized bits */
int numBits;
/** Zero value */
int zeroPoint;
/** Step of the quantizer. */
double scale;
};
/**
* @brief Enumerates the tensor data types. This parameter must be used together with {@link Tensor}.
*
* @since 3.2
* @version 1.0
*/
enum DataType : byte {
/** Unknown type. */
DATA_TYPE_UNKNOWN = 0,
/** Boolean. */
DATA_TYPE_BOOL = 30,
/** INT8. */
DATA_TYPE_INT8 = 32,
/** INT16. */
DATA_TYPE_INT16 = 33,
/** INT32. */
DATA_TYPE_INT32 = 34,
/** INT64. */
DATA_TYPE_INT64 = 35,
/** UINT8. */
DATA_TYPE_UINT8 = 37,
/** UINT16. */
DATA_TYPE_UINT16 = 38,
/** UINT32. */
DATA_TYPE_UINT32 = 39,
/** UINT64. */
DATA_TYPE_UINT64 = 40,
/** FLOAT16. */
DATA_TYPE_FLOAT16 = 42,
/** FLOAT32. */
DATA_TYPE_FLOAT32 = 43,
/** FLOAT64. */
DATA_TYPE_FLOAT64 = 44,
};
/**
* @brief Defines the input and output tensors of an AI model.
*
* @since 3.2
* @version 1.0
*/
struct IOTensor {
/** Tensor name. */
String name;
/** Data type of the tensor. For details, see {@link DataType}. */
enum DataType dataType;
/** Dimensions of the tensor. */
int[] dimensions;
/** Format of the tensor. For details, see {@link Format}. */
enum Format format;
/** Tensor data, which is stored in the shared memory. For details about the shared memory, see {@link SharedBuffer}. */
struct SharedBuffer data;
};
/**
* @brief Enumerates the quantization types. This parameter must be used together with {@link Node}.
*
* @since 3.2
* @version 1.0
*/
enum QuantType: byte {
/** Do not use quantification. */
QUANT_TYPE_NONE,
/** INT8 quantization. */
QUANT_TYPE_ALL,
};
/**
* @brief Enumerates the operator types.
*
* @since 3.2
* @version 1.0
*/
enum NodeType : unsigned int {
/** None. */
NODE_TYPE_NONE = 0,
/** Activation function */
NODE_TYPE_ACTIVATION = 2,
/** ADD operator */
NODE_TYPE_ADD_FUSION = 5,
/** ArgMax operator */
NODE_TYPE_ARGMAX_FUSION = 11,
/** AVGPOOL operator */
NODE_TYPE_AVGPOOL_FUSION = 17,
/** BatchToSpaceND operator */
NODE_TYPE_BATCH_TO_SPACE_ND = 22,
/** BiasAdd operator */
NODE_TYPE_BIAS_ADD = 23,
/** Cast operator */
NODE_TYPE_CAST = 28,
/** Concat operator */
NODE_TYPE_CONCAT = 31,
/** Conv2D operator, including common convolution, separable convolution, and group convolution */
NODE_TYPE_CONV2D_FUSION = 35,
/** Two-dimensional deconvolution operator */
NODE_TYPE_CONV2D_TRANSPOSE_FUSION = 36,
/** Div operator */
NODE_TYPE_DIV_FUSION = 47,
/** Element-level operator */
NODE_TYPE_ELTWISE = 52,
/** ExpandDims operator */
NODE_TYPE_EXPAND_DIMS = 56,
/** Fill operator */
NODE_TYPE_FILL = 66,
/** FullConnection operator */
NODE_TYPE_FULL_CONNECTION = 67,
/** BatchNorm operator */
NODE_TYPE_FUSED_BATCH_NORM = 68,
/** Gather operator */
NODE_TYPE_GATHER = 69,
/** LayerNorm operator */
NODE_TYPE_LAYER_NORM_FUSION = 75,
/** LessEqual operator */
NODE_TYPE_LESS_EQUAL = 78,
/** MatMul operator */
NODE_TYPE_MATMUL_FUSION = 89,
/** Maximum operator */
NODE_TYPE_MAXIMUM = 90,
/** MaxPool operator */
NODE_TYPE_MAX_POOL_FUSION = 92,
/** Mul operator */
NODE_TYPE_MUL_FUSION = 99,
/** OneHot operator */
NODE_TYPE_ONE_HOT = 105,
/** Pad operator */
NODE_TYPE_PAD_FUSION = 107,
/** Pow operator */
NODE_TYPE_POW_FUSION = 110,
/** PReLU operator */
NODE_TYPE_PRELU_FUSION = 112,
/** QuantDTypeCast operator */
NODE_TYPE_QUANT_DTYPE_CAST = 113,
/** Reduce operator */
NODE_TYPE_REDUCE_FUSION = 118,
/** Reshape operator */
NODE_TYPE_RESHAPE = 119,
/** Resize operator */
NODE_TYPE_RESIZE = 120,
/** Rsqrt operator */
NODE_TYPE_RSQRT = 126,
/** Scale operator */
NODE_TYPE_SCALE_FUSION = 127,
/** Shape operator */
NODE_TYPE_SHAPE = 130,
/** Slice operator */
NODE_TYPE_SLICE_FUSION = 135,
/** Softmax operator */
NODE_TYPE_SOFTMAX = 138,
/** SpaceToBatchND operator */
NODE_TYPE_SPACE_TO_BATCH_ND = 141,
/** Split operator */
NODE_TYPE_SPLIT = 145,
/** Sqrt operator */
NODE_TYPE_SQRT = 146,
/** Squeeze operator */
NODE_TYPE_SQUEEZE = 147,
/** SquaredDifference operator */
NODE_TYPE_SQUARED_DIFFERENCE = 149,
/** Stack operator */
NODE_TYPE_STACK = 150,
/** StridedSlice operator */
NODE_TYPE_STRIDED_SLICE = 151,
/** Sub operator */
NODE_TYPE_SUB_FUSION = 152,
/** Tile operator */
NODE_TYPE_TILE_FUSION = 160,
/** TopK operator */
NODE_TYPE_TOPK_FUSION = 161,
/** Transpose operator */
NODE_TYPE_TRANSPOSE = 162,
/** Unsqueeze operator */
NODE_TYPE_UNSQUEEZE = 165,
};
/**
* @brief Enumerates the resize methods. It must be used together with the {@link Resize} operator.
*
* @since 3.2
* @version 1.0
*/
enum ResizeMethod : byte {
/** Unknown. This is the default value. */
RESIZE_METHOD_UNKNOWN = -1,
/** Bilinear interpolation.
* For example, calculate the value of an unknown function f at point \f$ (x,y) \f$, where \f$ x_1< x < x_2, y_1< y < y_2 \f$.
* The values of the four coordinate points are \f$ Q_{11} = (x_1, y_1), Q_{12} = (x1, y2), Q_{21} = (x_2, y_1), and Q_{22} = (x_2, y_2) \f$.
* \f$f(Q_{11}), f(Q_{12}), f(Q_{21}), and f(Q_{22}) \f$ represent the values of the four points. The value of \f$ f(x,y) \f$ can be calculated by using the following formula:
\f[
f(x,y_1) = \frac{x_2-x}{x_2-x_1}f(Q_{11})+\frac{x-x_1}{x_2-x_1}f(Q_{21})
\f]
\f[
f(x,y_2) = \frac{x_2-x}{x_2-x_1}f(Q_{12})+\frac{x-x_1}{x_2-x_1}f(Q_{22})
\f]
\f[
f(x,y) = \frac{y_2-y}{y_2-y_1}f(x,y_1)+\frac{y-y_1}{y_2-y_1}f(x,y_2)
\f]
*/
RESIZE_METHOD_LINEAR = 0,
/** Nearest neighbor interpolation.
* For example, calculate the value of an unknown function f at point \f$ (x,y) \f$, where \f$ x_1< x < x_2, y_1< y < y_2 \f$.
* The values of the four coordinate points are \f$ Q_{11} = (x_1, y_1), Q_{12} = (x1, y2), Q_{21} = (x_2, y_1), and Q_{22} = (x_2, y_2) \f$.
* Then, the value of the point closest to the point \f$(x,y) \f$ is the value of \f$ f(x,y) \f$.
*/
RESIZE_METHOD_NEAREST = 1,
/** Bicubic interpolation.
* Bicubic interpolation obtains the value of a sampling point by calculating the weighted average of the values of 16 points around the sampling point. This parameter must be used together with cubicCoeff and coordinateTransformMode of {@link Resize}.
* When coordinateTransformMode==COORDINATE_TRANSFORM_MODE_HALF_PIXEL, cubicCoeff is -0.5. In other cases, cubicCoeff is -0.75. The weight function of the interpolation is as follows:
\f[
W(x) =
\begin{cases}
(cubicCoeff+2)|x|^3 - (cubicCoeff+3)|x|^2 +1 , &\text{if } |x| \leq 1; \cr
cubicCoeff|x|^3 - 5cubicCoeff|x|^2 + 8cubicCoeff|x| - 4a, &\text{if } 1 \lt |x| \leq 2; \cr
0, &\text{otherwise.}
\end{cases}
\f]
*/
RESIZE_METHOD_CUBIC = 2
};
/**
* @brief Enumerates the coordinate transformation modes. Only the {@link Resize} operator uses this parameter.
* For example, the width coordinates are transformed, where:
* new_i is the ith coordinate of the resized tensor along the x axis.
* old_i is the coordinate of the input tensor along the x axis.
* newWidth is the length of the resized tensor along the x axis.
* oldWidth is the length of the input tensor along the x axis.
* old_i can be calculated by using the following formula:
*
* COORDINATE_TRANSFORM_MODE_ASYMMETRIC: \f$ old_i = newWidth != 0 ? new_i * oldWidth / newWidth : 0 \f$
* COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS: \f$ old_i = newWidth != 1 ? new_i * (oldWidth - 1) / (newWidth - 1) \f$
* COORDINATE_TRANSFORM_MODE_HALF_PIXEL: \f$ old_i = newWidth > 1 ? (new_x + 0.5) * oldWidth / newWidth - 0.5 : 0 \f$
*
* @since 3.2
* @version 1.0
*/
enum CoordinateTransformMode : byte {
/** Scale based on the ratio without alignment. */
COORDINATE_TRANSFORM_MODE_ASYMMETRIC = 0,
/** Align the four corners of the image. */
COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS = 1,
/** Align with the pixel center. */
COORDINATE_TRANSFORM_MODE_HALF_PIXEL = 2
};
/**
* @brief Enumerates the nearest neighbor interpolation types. It must be used together with the {@link Resize} operator.
*
* @since 3.2
* @version 1.0
*/
enum NearestMode : byte {
/** Round off. */
NEAREST_MODE_NORMAL = 0,
/** Round toward negative infinity. For example, 23.5 is rounded to 23, and −23.5 is rounded to −24. */
NEAREST_MODE_ROUND_HALF_DOWN = 1,
/** Round toward positive infinity. For example, 23.5 is rounded to 24, and −23.5 is rounded to −23. */
NEAREST_MODE_ROUND_HALF_UP = 2,
/** Round down to the nearest integer. For example, 23.5 is rounded down to 23, and −23.5 is rounded down to −24. */
NEAREST_MODE_FLOOR = 3,
/** Round up to the nearest integer. For example, 23.5 is rounded up to 24, and −23.5 is rounded up to −23. */
NEAREST_MODE_CEIL = 4
};
/**
* @brief Enumerates the activation function types. Activation functions introduce nonlinearity to neural networks. This allows the use of neural network models in nonlinear models.
* If an operator in the {@link NodeAttrTypes.idl} file has ActivationType parameters, the corresponding activation function will be called after the operator calculation is complete.
*
* @since 3.2
* @version 1.0
*/
enum ActivationType : byte {
/** No activation function. */
ACTIVATION_TYPE_NO_ACTIVATION = 0,
/**
* ReLU activation function.
* ReLU calculates \f$ max(x_i, 0) \f$ element by element. It outputs the value directly if it is positive; otherwise, it outputs 0.
\f[
\text{ReLU}(x_i) = (x_i)^+ = \max(x_i, 0),
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_RELU = 1,
/**
* Sigmoid activation function.
* Execute the sigmoid activation function element-wise.
* The sigmoid function is defined as follows:
\f[
\text{Sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)}
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_SIGMOID = 2,
/**
* ReLU6 activation function.
* ReLU6 is similar to ReLU. The difference is ReLU6 has an upper limit of 6. If the input is greater than 6, the output is limited to 6.
* The ReLU6 function is defined as follows:
\f[
\text{ReLU6}(x_i) = \min(\max(0, x_i), 6)
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_RELU6 = 3,
/**
* Exponential Linear Unit (ELU) activation function.
* ELU calculates the ELU for each input element.
* The ELU function is defined as follows:
\f[
ELU(x_{i}) =
\begin{cases}
x_i, &\text{if } x_i \geq 0; \cr
\alpha * (\exp(x_i) - 1), &\text{otherwise.}
\end{cases}
\f]
* \f$ x_i \f$ indicates the input element, and \f$ \alpha \f$ indicates the alpha parameter, which is set by {@link Activation}.
*/
ACTIVATION_TYPE_ELU = 4,
/**
* LeakyReLU activation function.
* The LeakyReLU function is defined as follows:
\f[
\text{LeakyReLU}(x_i) =
\begin{cases}
x_i, &\text{if } x_i \geq 0; \cr
{\alpha} * x_i, &\text{otherwise.}
\end{cases}
\f]
* \f$ x_i \f$ indicates the input element, and \f$ \alpha \f$ indicates the alpha parameter, which is set by {@link Activation}.
*/
ACTIVATION_TYPE_LEAKY_RELU = 5,
/**
* Activation function for calculating the absolute value.
* The function is defined as follows:
\f[
\text{abs}(x_i) = |x_i|
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_ABS = 6,
/**
* ReLU1 activation function.
* The ReLU1 function is defined as follows:
\f[
\text{ReLU1}(x_i)= \min(\max(0, x_i), 1)
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_RELU1 = 7,
/**
* SoftSign activation function.
* The SoftSign function is defined as follows:
\f[
\text{SoftSign}(x_i) = \frac{x_i}{1 + |x_i|}
\f]
* \f$ x_i \f$ is the input.
*/
ACTIVATION_TYPE_SOFTSIGN = 8,
/**
* Softplus activation function.
* Softplus is a smooth approximation to ReLU. It can be used to constrain the output to always be positive.
* The Softplus function is defined as follows:
\f[
\text{Softplus}(x_i) = \log(1 + \exp(x_i))
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_SOFTPLUS = 9,
/**
* Tanh activation function.
* The Tanh function is defined as follows:
\f[
tanh(x) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1}
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_TANH = 10,
/**
* Scaled exponential Linear Unit (SELU) activation function.
* The SELU function is defined as follows:
\f[
SELU(x_{i}) =
scale *
\begin{cases}
x_{i}, &\text{if } x_{i} \geq 0; \cr
\text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
\end{cases}
\f]
* \f$ x_i \f$ is the input element, and \f$ \alpha \f$ and \f$ scale \f$ are predefined constants (\f$ \alpha = 1.67326324 \f$, \f$ scale = 1.05070098 \f$).
*/
ACTIVATION_TYPE_SELU = 11,
/**
* Hard Swish activation function.
*
\f[
\text{Hardswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6}
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_HSWISH = 12,
/**
* Hard sigmoid activation function.
* The hard sigmoid function is defined as follows:
\f[
\text{Hardsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6}))
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_HSIGMOID = 13,
/**
* ThresholdedReLU activation function.
* ThresholdedReLU is similar to ReLU. The ThresholdedReLU function is defined as follows:
\f[
\text{ThresholdedReLU}(x_i) = \min(\max(0, x_i), t)
\f]
* \f$ x_i \f$ is the input element, and \f$ t \f$ is the maximum value.
*/
ACTIVATION_TYPE_THRESHOLDRELU = 14,
/**
* Linear activation function.
* The Linear function is defined as follows:
\f[
\text{Linear}(x_i) = x_i
\f]
* \f$ x_i \f$ is the input element.
*/
ACTIVATION_TYPE_LINEAR = 15,
/**
* HardTanh activation function.
* The HardTanh function is defined as follows:
\f[
\text{HardTanh}(x_i) =
\begin{cases}
\text{max_val} & \text{ if } x_i > \text{ max_val } \\
\text{min_val} & \text{ if } x_i < \text{ min_val } \\
x_i & \text{ otherwise } \\
\end{cases}
\f]
* \f$ x_i \f$ is the input, \f$ max\_val \f$ is the maximum value, and \f$ min\_val \f$ is the minimum value. The two parameters are set by {@link Activation}.
*/
ACTIVATION_TYPE_HARD_TANH = 16,
/**
* Sign activation function.
* The Sign function is defined as follows:
\f[
Sign(x_i) = \begin{cases} -1, &if\ x_i < 0 \cr
0, &if\ x_i = 0 \cr
1, &if\ x_i > 0\end{cases}
\f]
* \f$ x_i \f$ is the input.
*/
ACTIVATION_TYPE_SIGN = 17,
/**
* Swish activation function.
* The Swish function is defined as follows:
\f[
\text{Swish}(x_i) = x_i * Sigmoid(x_i)
\f]
* \f$ x_i \f$ is the input.
*/
ACTIVATION_TYPE_SWISH = 18,
/**
* Gaussian error linear unit (GELU) activation function.
* The GELU function is defined as follows:
\f[
GELU(x_i) = x_i*P(X < x_i)
\f]
* \f$ x_i \f$ is the input element, and \f$ P \f$ is a cumulative distribution function of the standard Gaussian distribution.
* You need to use the approximate parameter of {@link Activation} to specify whether to use approximation.
*/
ACTIVATION_TYPE_GELU = 19,
/** Unknown */
ACTIVATION_TYPE_UNKNOWN = 20
};
/**
* @brief Enumerates the modes for removing dimensions. It must be used together with the {@link ReduceFusion} operator.
*
* @since 3.2
* @version 1.0
*/
enum ReduceMode : byte {
/** Use the average value of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_MEAN = 0,
/** Use the maximum value of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_MAX = 1,
/** Use the minimum value of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_MIN = 2,
/** Use the product of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_PROD = 3,
/** Use the sum of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_SUM = 4,
/** Use the sum of squares of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_SUM_SQUARE = 5,
/** Use the sum of absolute values of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_ASUM = 6,
/** Use the logical AND of all elements of a dimension to replace other elements of the dimension. */
REDUCE_MODE_ALL = 7
};
/**
* @brief Enumerates the calculation types supported by elements. It must be used together with the {@link Eltwise} operator.
*
* @since 3.2
* @version 1.0
*/
enum EltwiseMode : byte {
/** Product of the elements of two tensors */
ELTWISE_MODE_PROD = 0,
/** Difference between the elements of two tensors */
ELTWISE_MODE_SUM = 1,
/** Maximum value of the elements of two tensors */
ELTWISE_MODE_MAXIMUM = 2,
/** Unknown type */
ELTWISE_MODE_UNKNOWN = 3
};
/**
* @brief Enumerates the padding types. It must be used together with {@link AvgPoolFusion}, {@link AvgPoolFusion}, {@link Conv2DFusion}, and {@link MaxPoolFusion}.
*
* @since 3.2
* @version 1.0
*/
enum PadMode : byte {
/**
* Adds 0s in the input height and width directions.
* If this mode is used, the padding parameter of the operator must be greater than or equal to 0.
*/
PAD_MODE_PAD = 0,
/**
* The output height and width are obtained by dividing the input height and width by a stride and rounding off the quotient to an integer.
* If this mode is used, the padding parameter of the operator must be 0.
*/
PAD_MODE_SAME = 1,
/**
* Return the output of a valid calculation without padding. Pixels that do not meet the calculation requirements will be discarded.
* If this mode is used, the padding parameter of the operator must be 0.
*/
PAD_MODE_VALID = 2,
};
/**
* @brief Enumerates the algorithms for rounding off decimals. It must be used together with the {@link AvgPoolFusion} operator.
*
* @since 3.2
* @version 1.0
*/
enum RoundMode : byte {
/** Round down to the nearest integer. For example, 23.5 is rounded down to 23, and −23.5 is rounded down to −24. */
ROUND_MODE_FLOOR = 0,
/** Round up to the nearest integer. For example, 23.5 is rounded up to 24, and −23.5 is rounded up to −23. */
ROUND_MODE_CEIL = 1
};
/**
* @brief Enumerates the padding modes. It must be used together with the {@link PadFusion} operator.
*
* When x is \f$[[1,2,3],[4,5,6],[7,8,9]]\f$ and paddingsis \f$[[2,2], [2,2]] \f$, the effect is as follows:
* If paddingMode==PADDING_MODE_CONSTANT and constantValue = 0, the output is as follows:
*
\f$[[0. 0. 0. 0. 0. 0. 0.],\\
[0. 0. 0. 0. 0. 0. 0.],\\
[0. 0. 1. 2. 3. 0. 0.],\\
[0. 0. 4. 5. 6. 0. 0.],\\
[0. 0. 7. 8. 9. 0. 0.],\\
[0. 0. 0. 0. 0. 0. 0.],\\
[0. 0. 0. 0. 0. 0. 0.]]\\ \f$
*
* If paddingMode==PADDING_MODE_REFLECT, the output is as follows:
*
\f$[[9. 8. 7. 8. 9. 8. 7.],\\
[6. 5. 4. 5. 6. 5. 4.],\\
[3. 2. 1. 2. 3. 2. 1.],\\
[6. 5. 4. 5. 6. 5. 4.],\\
[9. 8. 7. 8. 9. 8. 7.],\\
[6. 5. 4. 5. 6. 5. 4.],\\
[3. 2. 1. 2. 3. 2. 1.]]\\ \f$
*
* If paddingMode==PADDING_MODE_SYMMETRIC, the output is as follows:
*
\f$[[5. 4. 4. 5. 6. 6. 5.],\\
[2. 1. 1. 2. 3. 3. 2.],\\
[2. 1. 1. 2. 3. 3. 2.],\\
[5. 4. 4. 5. 6. 6. 5.],\\
[8. 7. 7. 8. 9. 9. 8.],\\
[8. 7. 7. 8. 9. 9. 8.],\\
[5. 4. 4. 5. 6. 6. 5.]]\\ \f$
*
* @since 3.2
* @version 1.0
*/
enum PaddingMode : byte {
/** Constant (0 by default) padding. */
PADDING_MODE_CONSTANT = 0,
/** Reflection padding, which uses the content next to the input data to pad the values directly next to it. */
PADDING_MODE_REFLECT = 1,
/** Symmetric padding, which is similar to {@link PADDING_MODE_REFLECT}. Symmetric padding makes a copy of the input. */
PADDING_MODE_SYMMETRIC = 2,
/** Reserved. */
PADDING_MODE_RESERVED = 3
};
/** @} */