1/*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @addtogroup NNRt
18 * @{
19 *
20 * @brief Provides a unified interface for AI chip drivers to access OpenHarmony.
21 * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field.
22 *
23 * @since 3.2
24 * @version 2.0
25 */
26
27/**
28 * @file NnrtTypes.idl
29 *
30 * @brief Defines the types used in the HDI methods.
31 *
32 * @since 3.2
33 * @version 2.0
34 */
35
36/**
37 * @brief Defines the package path of the NNRt module.
38 *
39 * @since 3.2
40 * @version 2.0
41 */
42package ohos.hdi.nnrt.v2_0;
43
44/**
45 * @brief Defines the shared memory data structure.
46 *
47 * @since 3.2
48 * @version 2.0
49 */
50struct SharedBuffer {
51    /** File descriptor of the shared memory. */
52    FileDescriptor fd;
53    /** Size of the shared memory, in bytes. */
54    unsigned int bufferSize;
55    /** Offset of the start address of the valid data in the shared memory. */
56    unsigned int offset;
57    /** Space occupied by the valid data, in bytes. */
58    unsigned int dataSize;
59};
60
61/**
62 * @brief Enumerates the AI chip types.
63 *
64 * @since 3.2
65 * @version 2.0
66 */
67enum DeviceType: int {
68    /** Other types. */
69    OTHER,
70    /** CPU chip. */
71    CPU,
72    /** GPU chip. */
73    GPU,
74    /** AI acceleration chip, such as NPU chip and DSP chip. */
75    ACCELERATOR
76};
77
78/**
79 * @brief Enumerates the AI chip states.
80 *
81 * @since 3.2
82 * @version 2.0
83 */
84enum DeviceStatus: int {
85    /** Available. */
86    AVAILABLE,
87    /** Busy. The chip in this state may not respond to computing tasks in a timely manner. */
88    BUSY,
89    /** Offline. The chip in this state cannot respond to computing tasks. */
90    OFFLINE,
91    /** Unknown state. */
92    UNKNOWN
93};
94
95/**
96 * @brief Enumerates the performance modes for a chip to perform AI computing.
97 *
98 * @since 3.2
99 * @version 2.0
100 */
101enum PerformanceMode: int {
102    /** No performance mode is specified. The specific running mode is defined by the chip. */
103    PERFORMANCE_NONE,
104    /** Low-performance mode, which provides slow AI computing but low power consumption. */
105    PERFORMANCE_LOW,
106    /** Medium-performance mode, which provides moderate computing speed and power consumption. */
107    PERFORMANCE_MEDIUM,
108    /** High-performance mode, which provides fast AI computing but high power consumption. */
109    PERFORMANCE_HIGH,
110    /** Extreme-performance mode, which provides the fastest AI computing but highest power consumption. */
111    PERFORMANCE_EXTREME
112};
113
114/**
115 * @brief Enumerates the AI computing task priorities.
116 *
117 * @since 3.2
118 * @version 2.0
119 */
120enum Priority: int {
121    /** No task priority is specified. The specific execution policy is defined by the chip. */
122    PRIORITY_NONE,
123    /** Low priority. A task with a higher priority will be executed first. */
124    PRIORITY_LOW,
125    /** Medium priority. A task with a higher priority will be executed first. */
126    PRIORITY_MEDIUM,
127    /** High priority. High-priority tasks are executed first. */
128    PRIORITY_HIGH
129};
130
131/**
132 * @brief Defines the parameters required for model building.
133 *
134 * @since 3.2
135 * @version 2.0
136 */
137struct ModelConfig {
138    /** Whether to run a Float32 model in Float16 precision. */
139    boolean enableFloat16;
140    /** Performance mode of the computing task. For details, see {@link PerformanceMode}. */
141    enum PerformanceMode mode;
142    /** Priority of the computing task. For details, see {@link Priority}. */
143    enum Priority priority;
144    /** Custom attributes of the underlying hardware. They stored in the format of name:binary value
145      * and parsed by the HDI service.
146      */
147    Map<String, byte[]> extensions;
148};
149
150/**
151 * @brief Enumerates the operator data formats. This parameter must be used together with {@link Tensor}.
152 *
153 * @since 3.2
154 * @version 2.0
155 */
156enum Format : byte {
157    /** Format initial value. */
158    FORMAT_NONE = -1,
159    /** NCHW, which indicates the number of data samples, image channels, image height, and image width in sequence. */
160    FORMAT_NCHW = 0,
161    /** NHWC. */
162    FORMAT_NHWC = 1
163};
164
165/**
166 * @brief Defines the quantization parameter structure.
167 *
168 * In the following formula, <b>q</b> is a quantized parameter, <b>r</b> is a real parameter, <b>\f$ r_{max} \f$</b>\n
169 * is the maximum value of the data to be quantized, <b>\f$ r_{min} \f$</b> is the minimum value of the data
170 * to be quantized, and <b>round(x)</b> means to round off <b>x</b> to an integer.
171 \f[
172  \text{clamp}(x,min,max) =
173  \begin{cases}
174       \text{max} & \text{ if } x > \text{ max } \\
175       \text{min} & \text{ if } x < \text{ min } \\
176       x & \text{ otherwise } \\
177   \end{cases}
178 \f]
179 * Formula for transforming a real number from a floating-point representation to a fixed-point representation:
180 \f[
181    \text{q}(x_i) = clamp(round(\frac{r}{scale}+zeroPoint), min , max)
182 \f]
183 * Formula for transforming a real number from a fixed-point representation to a floating-point representation:
184 \f[
185    \text{r}= (q-zeroPoint)*scale
186 \f]
187 * <b>scale</b> is calculated by using the following formula:
188 \f[
189    scale = \frac{r_{max}-r_{min}}{q_{max}-q_{min}}
190 \f]
191  * <b>zeroPoint</b> is calculated by using the following formula:
192 \f[
193    zeroPoint = round(q_{min}-\frac{r_{min}}{scale})
194 \f]
195  * <b>\f$ q_{min},q_{max} \f$</b> is calculated by using the following formula:
196 \f[
197    q_{min} = -(1<<(numBits-1))
198 \f]
199 \f[
200    q_{max} = (1<<(numBits-1))-1
201 \f]
202 * When \f$ r_{min} \f$ and \f$ r_{max} \f$ are <b>0</b>, <b>scale</b> and <b>zeroPoint</b> must be <b>0</b>.
203 *
204 * @since 3.2
205 * @version 2.0
206 */
207struct QuantParam {
208    /** Number of quantized bits. */
209    int numBits;
210    /** Zero value. */
211    int zeroPoint;
212    /** Step of the quantizer. */
213    double scale;
214};
215
216/**
217 * @brief Enumerates the tensor data types. This parameter must be used together with {@link Tensor}.
218 *
219 * @since 3.2
220 * @version 2.0
221 */
222enum DataType : byte {
223    /** Unknown type. */
224    DATA_TYPE_UNKNOWN = 0,
225    /** Boolean. */
226    DATA_TYPE_BOOL = 30,
227    /** INT8. */
228    DATA_TYPE_INT8 = 32,
229    /** INT16. */
230    DATA_TYPE_INT16 = 33,
231    /** INT32. */
232    DATA_TYPE_INT32 = 34,
233    /** INT64. */
234    DATA_TYPE_INT64 = 35,
235    /** UINT8. */
236    DATA_TYPE_UINT8 = 37,
237    /** UINT16. */
238    DATA_TYPE_UINT16 = 38,
239    /** UINT32. */
240    DATA_TYPE_UINT32 = 39,
241    /** UINT64. */
242    DATA_TYPE_UINT64 = 40,
243    /** FLOAT16. */
244    DATA_TYPE_FLOAT16 = 42,
245    /** FLOAT32. */
246    DATA_TYPE_FLOAT32 = 43,
247    /** FLOAT64. */
248    DATA_TYPE_FLOAT64 = 44,
249};
250
251/**
252 * @brief Defines the input and output tensors of an AI model.
253 *
254 * @since 3.2
255 * @version 2.0
256 */
257struct IOTensor {
258    /** Tensor name. */
259    String name;
260    /** Data type of the tensor. For details, see {@link DataType}. */
261    enum DataType dataType;
262    /** Dimensions of the tensor. */
263    int[] dimensions;
264    /** Format of the tensor. For details, see {@link Format}. */
265    enum Format format;
266    /** Tensor data, which is stored in the shared memory. For details about the shared memory,\n
267      * see {@link SharedBuffer}.
268      */
269    struct SharedBuffer data;
270};
271
272/**
273 * @brief Enumerates the quantization types. This parameter must be used together with {@link Node}.
274 *
275 * @since 3.2
276 * @version 2.0
277 */
278enum QuantType: byte {
279    /** Do not use quantification. */
280    QUANT_TYPE_NONE,
281    /** INT8 quantization. */
282    QUANT_TYPE_ALL,
283};
284
285/**
286 * @brief Enumerates the operator types.
287 *
288 * @since 3.2
289 * @version 2.0
290 */
291enum NodeType : unsigned int {
292    /** None. */
293    NODE_TYPE_NONE = 0,
294    /** Activation function. */
295    NODE_TYPE_ACTIVATION = 2,
296    /** ADD operator. */
297    NODE_TYPE_ADD_FUSION = 5,
298    /** ArgMax operator. */
299    NODE_TYPE_ARGMAX_FUSION = 11,
300    /** AVGPOOL operator. */
301    NODE_TYPE_AVGPOOL_FUSION = 17,
302    /** BatchToSpaceND operator. */
303    NODE_TYPE_BATCH_TO_SPACE_ND = 22,
304    /** BiasAdd operator. */
305    NODE_TYPE_BIAS_ADD = 23,
306    /** Cast operator. */
307    NODE_TYPE_CAST = 28,
308    /** Concat operator. */
309    NODE_TYPE_CONCAT = 31,
310    /** Conv2D operator, including common convolution, separable convolution, and group convolution. */
311    NODE_TYPE_CONV2D_FUSION = 35,
312    /** Two-dimensional deconvolution operator. */
313    NODE_TYPE_CONV2D_TRANSPOSE_FUSION = 36,
314    /** Div operator. */
315    NODE_TYPE_DIV_FUSION = 47,
316    /** Element-level operator. */
317    NODE_TYPE_ELTWISE = 52,
318    /** ExpandDims operator. */
319    NODE_TYPE_EXPAND_DIMS = 56,
320    /** Fill operator. */
321    NODE_TYPE_FILL = 66,
322    /** FullConnection operator. */
323    NODE_TYPE_FULL_CONNECTION = 67,
324    /** BatchNorm operator. */
325    NODE_TYPE_FUSED_BATCH_NORM = 68,
326    /** Gather operator. */
327    NODE_TYPE_GATHER = 69,
328    /** LayerNorm operator. */
329    NODE_TYPE_LAYER_NORM_FUSION = 75,
330    /** LessEqual operator. */
331    NODE_TYPE_LESS_EQUAL = 78,
332    /** MatMul operator. */
333    NODE_TYPE_MATMUL_FUSION = 89,
334    /** Maximum operator. */
335    NODE_TYPE_MAXIMUM = 90,
336    /** MaxPool operator. */
337    NODE_TYPE_MAX_POOL_FUSION = 92,
338    /** Mul operator. */
339    NODE_TYPE_MUL_FUSION = 99,
340    /** OneHot operator. */
341    NODE_TYPE_ONE_HOT = 105,
342    /** Pad operator. */
343    NODE_TYPE_PAD_FUSION = 107,
344    /** Pow operator. */
345    NODE_TYPE_POW_FUSION = 110,
346    /** PReLU operator. */
347    NODE_TYPE_PRELU_FUSION = 112,
348    /** QuantDTypeCast operator. */
349    NODE_TYPE_QUANT_DTYPE_CAST = 113,
350    /** Reduce operator. */
351    NODE_TYPE_REDUCE_FUSION = 118,
352    /** Reshape operator. */
353    NODE_TYPE_RESHAPE = 119,
354    /** Resize operator. */
355    NODE_TYPE_RESIZE = 120,
356    /** Rsqrt operator. */
357    NODE_TYPE_RSQRT = 126,
358    /** Scale operator. */
359    NODE_TYPE_SCALE_FUSION = 127,
360    /** Shape operator. */
361    NODE_TYPE_SHAPE = 130,
362    /** Slice operator. */
363    NODE_TYPE_SLICE_FUSION = 135,
364    /** Softmax operator. */
365    NODE_TYPE_SOFTMAX = 138,
366    /** SpaceToBatchND operator. */
367    NODE_TYPE_SPACE_TO_BATCH_ND = 141,
368    /** Split operator. */
369    NODE_TYPE_SPLIT = 145,
370    /** Sqrt operator. */
371    NODE_TYPE_SQRT = 146,
372    /** Squeeze operator. */
373    NODE_TYPE_SQUEEZE = 147,
374    /** SquaredDifference operator. */
375    NODE_TYPE_SQUARED_DIFFERENCE = 149,
376    /** Stack operator. */
377    NODE_TYPE_STACK = 150,
378    /** StridedSlice operator. */
379    NODE_TYPE_STRIDED_SLICE = 151,
380    /** Sub operator. */
381    NODE_TYPE_SUB_FUSION = 152,
382    /** Tile operator. */
383    NODE_TYPE_TILE_FUSION = 160,
384    /** TopK operator. */
385    NODE_TYPE_TOPK_FUSION = 161,
386    /** Transpose operator. */
387    NODE_TYPE_TRANSPOSE = 162,
388    /** Unsqueeze operator. */
389    NODE_TYPE_UNSQUEEZE = 165,
390};
391
392/**
393 * @brief Enumerates the resize methods. It must be used together with the {@link Resize} operator.
394 *
395 * @since 3.2
396 * @version 2.0
397 */
398enum ResizeMethod : byte {
399    /** Unknown. This is the default value. */
400    RESIZE_METHOD_UNKNOWN = -1,
401    /** Bilinear interpolation.
402     * For example, calculate the value of an unknown function <b>f</b> at point \f$ (x,y) \f$, where\n
403     * \f$ x_1< x < x_2, y_1< y < y_2 \f$.
404     * The values of the four coordinate points are \f$ Q_{11} = (x_1, y_1), Q_{12} = (x1, y2), Q_{21} = (x_2, y_1),\n
405     * and Q_{22} = (x_2, y_2) \f$.
406     * \f$f(Q_{11}), f(Q_{12}), f(Q_{21}), and f(Q_{22}) \f$ represent the values of the four points.\n
407     * The value of \f$ f(x,y) \f$ can be calculated by using the following formula:
408      \f[
409         f(x,y_1) = \frac{x_2-x}{x_2-x_1}f(Q_{11})+\frac{x-x_1}{x_2-x_1}f(Q_{21})
410      \f]
411
412      \f[
413         f(x,y_2) = \frac{x_2-x}{x_2-x_1}f(Q_{12})+\frac{x-x_1}{x_2-x_1}f(Q_{22})
414      \f]
415
416      \f[
417         f(x,y) = \frac{y_2-y}{y_2-y_1}f(x,y_1)+\frac{y-y_1}{y_2-y_1}f(x,y_2)
418      \f]
419     */
420    RESIZE_METHOD_LINEAR = 0,
421    /** Nearest neighbor interpolation.
422     * For example, calculate the value of an unknown function <b>f</b> at point \f$ (x,y) \f$, where\n
423     * \f$ x_1< x < x_2, y_1< y < y_2 \f$.
424     * The values of the four coordinate points are \f$ Q_{11} = (x_1, y_1), Q_{12} = (x1, y2),\n
425     * Q_{21} = (x_2, y_1), and Q_{22} = (x_2, y_2) \f$.
426     * Then, the value of the point closest to the point \f$(x,y) \f$ is the value of \f$ f(x,y) \f$.
427     */
428    RESIZE_METHOD_NEAREST = 1,
429    /** Bicubic interpolation.
430     * Bicubic interpolation obtains the value of a sampling point by calculating the weighted average of the\n
431     * values of 16 points around the sampling point. This parameter must be used together with <b>cubicCoeff</b>\n
432     * and <b>coordinateTransformMode</b> of {@link Resize}.
433     * When coordinateTransformMode==COORDINATE_TRANSFORM_MODE_HALF_PIXEL, <b>cubicCoeff</b> is <b>-0.5</b>.\n
434     * In other cases, cubicCoeff is <b>-0.75</b>. The weight function of the interpolation is as follows:
435      \f[
436         W(x) =
437         \begin{cases}
438            (cubicCoeff+2)|x|^3 - (cubicCoeff+3)|x|^2 +1 , &\text{if } |x| \leq 1; \cr
439            cubicCoeff|x|^3 - 5cubicCoeff|x|^2 + 8cubicCoeff|x| - 4a, &\text{if } 1 \lt |x| \leq 2; \cr
440            0, &\text{otherwise.}
441        \end{cases}
442      \f]
443     */
444    RESIZE_METHOD_CUBIC = 2
445};
446
447/**
448 * @brief Enumerates the coordinate transformation modes. Only the {@link Resize} operator uses this parameter.
449 * For example, the width coordinates are transformed, where:
450 * <b>new_i</b> is the ith coordinate of the resized tensor along the x axis.
451 * <b>old_i</b> is the coordinate of the input tensor along the x axis.
452 * <b>newWidth</b> is the length of the resized tensor along the x axis.
453 * <b>oldWidth</b> is the length of the input tensor along the x axis.
454 * <b>old_i</b> can be calculated by using the following formula:
455 *
456 * COORDINATE_TRANSFORM_MODE_ASYMMETRIC: \f$ old_i = newWidth != 0 ? new_i * oldWidth / newWidth : 0 \f$ <br>
457 * COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS: \f$ old_i = newWidth != 1 ? new_i * (oldWidth - 1) / (newWidth - 1) \f$<br>
458 * COORDINATE_TRANSFORM_MODE_HALF_PIXEL: \f$ old_i = newWidth > 1 ? (new_x + 0.5) * oldWidth / newWidth - 0.5 : 0 \f$<br>
459 *
460 * @since 3.2
461 * @version 2.0
462 */
463enum CoordinateTransformMode : byte {
464    /** Scale based on the ratio without alignment. */
465    COORDINATE_TRANSFORM_MODE_ASYMMETRIC = 0,
466    /** Align the four corners of the image. */
467    COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS = 1,
468    /** Align with the pixel center. */
469    COORDINATE_TRANSFORM_MODE_HALF_PIXEL = 2
470};
471
472/**
473 * @brief Enumerates the nearest neighbor interpolation types. It must be used together with the
474 * {@link Resize} operator.
475 *
476 * @since 3.2
477 * @version 2.0
478 */
479enum NearestMode : byte {
480    /** Round off. */
481    NEAREST_MODE_NORMAL = 0,
482    /** Round toward negative infinity. For example, 23.5 is rounded to 23, and −23.5 is rounded to −24. */
483    NEAREST_MODE_ROUND_HALF_DOWN = 1,
484    /** Round toward positive infinity. For example, 23.5 is rounded to 24, and −23.5 is rounded to −23. */
485    NEAREST_MODE_ROUND_HALF_UP = 2,
486    /** Round down to the nearest integer. For example, 23.5 is rounded down to 23, and −23.5 is rounded down
487      * to −24.
488      */
489    NEAREST_MODE_FLOOR = 3,
490    /** Round up to the nearest integer. For example, 23.5 is rounded up to 24, and −23.5 is rounded up to −23. */
491    NEAREST_MODE_CEIL = 4
492};
493
494/**
495 * @brief Enumerates the activation function types. Activation functions introduce nonlinearity to neural networks.\n
496 * This allows the use of neural network models in nonlinear models.
497 * If an operator in the {@link NodeAttrTypes.idl} file has <b>ActivationType</b> parameters,\n
498 * the corresponding activation function will be called after the operator calculation is complete.
499 *
500 * @since 3.2
501 * @version 2.0
502 */
503enum ActivationType : byte {
504    /** No activation function. */
505    ACTIVATION_TYPE_NO_ACTIVATION = 0,
506    /**
507     * ReLU activation function.
508     * ReLU calculates \f$ max(x_i, 0) \f$ element by element. It outputs the value directly if it is positive;\n
509     * otherwise, it outputs <b>0</b>.
510     \f[
511        \text{ReLU}(x_i) = (x_i)^+ = \max(x_i, 0),
512     \f]
513     * <b>\f$ x_i \f$</b> is the input element.
514     */
515    ACTIVATION_TYPE_RELU = 1,
516    /**
517     * Sigmoid activation function.
518     * Execute the sigmoid activation function element-wise.
519     * The sigmoid function is defined as follows:
520     \f[
521        \text{Sigmoid}(x_i) = \frac{1}{1 + \exp(-x_i)}
522     \f]
523     * <b>\f$ x_i \f$</b> is the input element.
524     */
525    ACTIVATION_TYPE_SIGMOID = 2,
526    /**
527     * ReLU6 activation function.
528     * ReLU6 is similar to ReLU. The difference is ReLU6 has an upper limit of <b>6</b>. If the input is greater than 6,\n
529     * the output is limited to <b>6</b>.
530     * The ReLU6 function is defined as follows:
531     \f[
532        \text{ReLU6}(x_i) = \min(\max(0, x_i), 6)
533     \f]
534     * <b>\f$ x_i \f$</b> is the input element.
535     */
536    ACTIVATION_TYPE_RELU6 = 3,
537    /**
538     * Exponential Linear Unit (ELU) activation function.
539     * ELU calculates the ELU for each input element.
540     * The ELU function is defined as follows:
541     \f[
542        ELU(x_{i}) =
543        \begin{cases}
544        x_i, &\text{if } x_i \geq 0; \cr
545        \alpha * (\exp(x_i) - 1), &\text{otherwise.}
546        \end{cases}
547     \f]
548     * <b>\f$ x_i \f$</b> indicates the input element, and <b>\f$ \alpha \f$</b> indicates the alpha parameter,\n
549     * which is set by {@link Activation}.
550     */
551    ACTIVATION_TYPE_ELU = 4,
552    /**
553     * LeakyReLU activation function.
554     * The LeakyReLU function is defined as follows:
555     \f[
556        \text{LeakyReLU}(x_i) =
557        \begin{cases}
558            x_i, &\text{if } x_i \geq 0; \cr
559            {\alpha} * x_i, &\text{otherwise.}
560        \end{cases}
561     \f]
562     * <b>\f$ x_i \f$</b> indicates the input element, and <b>\f$ \alpha \f$</b> indicates the alpha parameter,\n
563     * which is set by {@link Activation}.
564     */
565    ACTIVATION_TYPE_LEAKY_RELU = 5,
566    /**
567     * Activation function for calculating the absolute value.
568     * The function is defined as follows:
569     \f[
570        \text{abs}(x_i) = |x_i|
571     \f]
572     * <b>\f$ x_i \f$</b> is the input element.
573     */
574    ACTIVATION_TYPE_ABS = 6,
575    /**
576     * ReLU1 activation function.
577     * The ReLU1 function is defined as follows:
578     \f[
579        \text{ReLU1}(x_i)= \min(\max(0, x_i), 1)
580     \f]
581     * <b>\f$ x_i \f$</b> is the input element.
582     */
583    ACTIVATION_TYPE_RELU1 = 7,
584    /**
585     * SoftSign activation function.
586     * The SoftSign function is defined as follows:
587     \f[
588        \text{SoftSign}(x_i) = \frac{x_i}{1 + |x_i|}
589     \f]
590     * <b>\f$ x_i \f$</b> is the input.
591     */
592    ACTIVATION_TYPE_SOFTSIGN = 8,
593    /**
594     * Softplus activation function.
595     * Softplus is a smooth approximation to ReLU. It can be used to constrain the output to always be positive.
596     * The Softplus function is defined as follows:
597     \f[
598        \text{Softplus}(x_i) = \log(1 + \exp(x_i))
599     \f]
600     * <b>\f$ x_i \f$</b> is the input element.
601     */
602    ACTIVATION_TYPE_SOFTPLUS = 9,
603    /**
604     * Tanh activation function.
605     * The Tanh function is defined as follows:
606     \f[
607        tanh(x) = \frac{\exp(x_i) - \exp(-x_i)}{\exp(x_i) + \exp(-x_i)} = \frac{\exp(2x_i) - 1}{\exp(2x_i) + 1}
608     \f]
609     * <b>\f$ x_i \f$</b> is the input element.
610     */
611    ACTIVATION_TYPE_TANH = 10,
612    /**
613     * Scaled exponential Linear Unit (SELU) activation function.
614     * The SELU function is defined as follows:
615     \f[
616        SELU(x_{i}) =
617        scale *
618        \begin{cases}
619        x_{i}, &\text{if } x_{i} \geq 0; \cr
620        \text{alpha} * (\exp(x_i) - 1), &\text{otherwise.}
621        \end{cases}
622     \f]
623     * <b>\f$ x_i \f$</b> is the input element, and <b>\f$ \alpha \f$</b> and <b>\f$ scale \f$</b> are predefined\n
624     * constants (\f$ \alpha = 1.67326324 \f$, \f$ scale = 1.05070098 \f$).
625     */
626    ACTIVATION_TYPE_SELU = 11,
627    /**
628     * Hard Swish activation function.
629     *
630     \f[
631        \text{Hardswish}(x_{i}) = x_{i} * \frac{ReLU6(x_{i} + 3)}{6}
632     \f]
633     * <b>\f$ x_i \f$</b> is the input element.
634     */
635    ACTIVATION_TYPE_HSWISH = 12,
636    /**
637     * Hard sigmoid activation function.
638     * The hard sigmoid function is defined as follows:
639     \f[
640        \text{Hardsigmoid}(x_{i}) = max(0, min(1, \frac{x_{i} + 3}{6}))
641     \f]
642     * <b>\f$ x_i \f$</b> is the input element.
643     */
644    ACTIVATION_TYPE_HSIGMOID = 13,
645    /**
646     * ThresholdedReLU activation function.
647     * ThresholdedReLU is similar to ReLU. The <b>ThresholdedReLU</b> function is defined as follows:
648     \f[
649        \text{ThresholdedReLU}(x_i) = \min(\max(0, x_i), t)
650     \f]
651     * <b>\f$ x_i \f$</b> is the input element, and <b>\f$ t \f$</b> is the maximum value.
652     */
653    ACTIVATION_TYPE_THRESHOLDRELU = 14,
654    /**
655     * Linear activation function.
656     * The Linear function is defined as follows:
657     \f[
658        \text{Linear}(x_i) = x_i
659     \f]
660     * <b>\f$ x_i \f$</b> is the input element.
661     */
662    ACTIVATION_TYPE_LINEAR = 15,
663    /**
664     * HardTanh activation function.
665     * The HardTanh function is defined as follows:
666     \f[
667       \text{HardTanh}(x_i) =
668       \begin{cases}
669            \text{max_val} & \text{ if } x_i > \text{ max_val } \\
670            \text{min_val} & \text{ if } x_i < \text{ min_val } \\
671            x_i & \text{ otherwise } \\
672        \end{cases}
673     \f]
674     * <b>\f$ x_i \f$</b> is the input, <b>\f$ max\_val \f$</b> is the maximum value, and <b>\f$ min\_val \f$</b>\n
675     * is the minimum value. The two parameters are set by {@link Activation}.
676     */
677    ACTIVATION_TYPE_HARD_TANH = 16,
678    /**
679     * Sign activation function.
680     * The Sign function is defined as follows:
681     \f[
682        Sign(x_i) = \begin{cases} -1, &if\ x_i < 0 \cr
683        0, &if\ x_i = 0 \cr
684        1, &if\ x_i > 0\end{cases}
685     \f]
686     * <b>\f$ x_i \f$</b> is the input.
687     */
688    ACTIVATION_TYPE_SIGN = 17,
689    /**
690     * Swish activation function.
691     * The Swish function is defined as follows:
692     \f[
693        \text{Swish}(x_i) = x_i * Sigmoid(x_i)
694     \f]
695     * <b>\f$ x_i \f$</b> is the input.
696     */
697    ACTIVATION_TYPE_SWISH = 18,
698    /**
699     * Gaussian error linear unit (GELU) activation function.
700     * The GELU function is defined as follows:
701     \f[
702        GELU(x_i) = x_i*P(X < x_i)
703     \f]
704     * <b>\f$ x_i \f$</b> is the input element, and <b>\f$ P \f$</b> is a cumulative distribution function of\n
705     * the standard Gaussian distribution.
706     * You need to use the <b>approximate</b> parameter of {@link Activation} to specify whether to use approximation.
707     */
708    ACTIVATION_TYPE_GELU = 19,
709    /** Unknown. */
710    ACTIVATION_TYPE_UNKNOWN = 20
711};
712
713/**
714 * @brief Enumerates the modes for removing dimensions. It must be used together with the {@link ReduceFusion} operator.
715 *
716 * @since 3.2
717 * @version 2.0
718 */
719enum ReduceMode : byte {
720    /** Use the average value of all elements of a dimension to replace other elements of the dimension. */
721    REDUCE_MODE_MEAN = 0,
722    /** Use the maximum value of all elements of a dimension to replace other elements of the dimension. */
723    REDUCE_MODE_MAX = 1,
724    /** Use the minimum value of all elements of a dimension to replace other elements of the dimension. */
725    REDUCE_MODE_MIN = 2,
726    /** Use the product of all elements of a dimension to replace other elements of the dimension. */
727    REDUCE_MODE_PROD = 3,
728    /** Use the sum of all elements of a dimension to replace other elements of the dimension. */
729    REDUCE_MODE_SUM = 4,
730    /** Use the sum of squares of all elements of a dimension to replace other elements of the dimension. */
731    REDUCE_MODE_SUM_SQUARE = 5,
732    /** Use the sum of absolute values of all elements of a dimension to replace other elements of the dimension. */
733    REDUCE_MODE_ASUM = 6,
734    /** Use the logical AND of all elements of a dimension to replace other elements of the dimension. */
735    REDUCE_MODE_ALL = 7
736};
737
738/**
739 * @brief Enumerates the calculation types supported by elements. It must be used together with the\n
740 * {@link Eltwise} operator.
741 *
742 * @since 3.2
743 * @version 2.0
744 */
745enum EltwiseMode : byte {
746    /** Product of the elements of two tensors */
747    ELTWISE_MODE_PROD = 0,
748    /** Difference between the elements of two tensors */
749    ELTWISE_MODE_SUM = 1,
750    /** Maximum value of the elements of two tensors */
751    ELTWISE_MODE_MAXIMUM = 2,
752    /** Unknown type. */
753    ELTWISE_MODE_UNKNOWN = 3
754};
755
756/**
757 * @brief Enumerates the padding types. It must be used together with {@link AvgPoolFusion}, {@link AvgPoolFusion},\n
758 * {@link Conv2DFusion}, and {@link MaxPoolFusion}.
759 *
760 * @since 3.2
761 * @version 2.0
762 */
763enum PadMode : byte {
764    /**
765     * Adds 0s in the input height and width directions.
766     * If this mode is used, the padding parameter of the operator must be greater than or equal to 0.
767     */
768    PAD_MODE_PAD = 0,
769    /**
770     * The output height and width are obtained by dividing the input height and width by a stride and rounding off\n
771     * the quotient to an integer.
772     * If this mode is used, the padding parameter of the operator must be <b>0</b>.
773     */
774    PAD_MODE_SAME = 1,
775    /**
776     * Return the output of a valid calculation without padding. Pixels that do not meet the calculation requirements\n
777     * will be discarded.
778     * If this mode is used, the padding parameter of the operator must be <b>0</b>.
779     */
780    PAD_MODE_VALID = 2,
781};
782
783/**
784 * @brief Enumerates the algorithms for rounding off decimals. It must be used together with the\n
785 * {@link AvgPoolFusion} operator.
786 *
787 * @since 3.2
788 * @version 2.0
789 */
790enum RoundMode : byte {
791    /** Round down to the nearest integer. For example, 23.5 is rounded down to 23, and −23.5 is rounded down to −24. */
792    ROUND_MODE_FLOOR = 0,
793    /** Round up to the nearest integer. For example, 23.5 is rounded up to 24, and −23.5 is rounded up to −23. */
794    ROUND_MODE_CEIL = 1
795};
796
797/**
798 * @brief Enumerates the padding modes. It must be used together with the {@link PadFusion} operator.
799 *
800 * When <b>x</b> is \f$[[1,2,3],[4,5,6],[7,8,9]]\f$ and <b>paddings</b>is \f$[[2,2], [2,2]] \f$,\n
801 * the effect is as follows: <br>
802 * If paddingMode==PADDING_MODE_CONSTANT and constantValue = 0, the output is as follows:
803 *
804      \f$[[0. 0. 0. 0. 0. 0. 0.],\\
805          [0. 0. 0. 0. 0. 0. 0.],\\
806          [0. 0. 1. 2. 3. 0. 0.],\\
807          [0. 0. 4. 5. 6. 0. 0.],\\
808          [0. 0. 7. 8. 9. 0. 0.],\\
809          [0. 0. 0. 0. 0. 0. 0.],\\
810          [0. 0. 0. 0. 0. 0. 0.]]\\ \f$
811 *
812 * If paddingMode==PADDING_MODE_REFLECT, the output is as follows:
813 *
814      \f$[[9. 8. 7. 8. 9. 8. 7.],\\
815          [6. 5. 4. 5. 6. 5. 4.],\\
816          [3. 2. 1. 2. 3. 2. 1.],\\
817          [6. 5. 4. 5. 6. 5. 4.],\\
818          [9. 8. 7. 8. 9. 8. 7.],\\
819          [6. 5. 4. 5. 6. 5. 4.],\\
820          [3. 2. 1. 2. 3. 2. 1.]]\\ \f$
821 *
822 * If paddingMode==PADDING_MODE_SYMMETRIC, the output is as follows:
823 *
824      \f$[[5. 4. 4. 5. 6. 6. 5.],\\
825          [2. 1. 1. 2. 3. 3. 2.],\\
826          [2. 1. 1. 2. 3. 3. 2.],\\
827          [5. 4. 4. 5. 6. 6. 5.],\\
828          [8. 7. 7. 8. 9. 9. 8.],\\
829          [8. 7. 7. 8. 9. 9. 8.],\\
830          [5. 4. 4. 5. 6. 6. 5.]]\\ \f$
831 *
832 * @since 3.2
833 * @version 2.0
834 */
835enum PaddingMode : byte {
836    /** Constant (0 by default) padding. */
837    PADDING_MODE_CONSTANT = 0,
838    /** Reflection padding, which uses the content next to the input data to pad the values directly next to it. */
839    PADDING_MODE_REFLECT = 1,
840    /** Symmetric padding, which is similar to {@link PADDING_MODE_REFLECT}. Symmetric padding makes a copy of the input. */
841    PADDING_MODE_SYMMETRIC = 2,
842    /** Reserved. */
843    PADDING_MODE_RESERVED = 3
844};
845
846/**
847 * @brief Dedicated error codes defined by NNRt. They are used as the return values of HDIs.
848 *
849 * @since 4.0
850 * @version 2.0
851 */
852enum NNRT_ReturnCode : unsigned int {
853    /** Success. */
854    NNRT_SUCCESS = 0,
855    /** Failed. */
856    NNRT_FAILED = 1,
857    /** Null pointer. */
858    NNRT_NULL_PTR = 2,
859    /** Invalid parameter. */
860    NNRT_INVALID_PARAMETER = 3,
861    /** Memory error. */
862    NNRT_MEMORY_ERROR = 4,
863    /** Insufficient memory. */
864    NNRT_OUT_OF_MEMORY = 5,
865    /** Forbidden operation. */
866    NNRT_OPERATION_FORBIDDEN = 6,
867    /** Invalid file. */
868    NNRT_INVALID_FILE = 7,
869    /** Invalid path. */
870    NNRT_INVALID_PATH = 8,
871    /** Insufficient cache. */
872    NNRT_INSUFFICIENT_BUFFER = 9,
873    /** No change. */
874    NNRT_NO_CHANGE = 10,
875    /** Not supported. */
876    NNRT_NOT_SUPPORT = 11,
877    /** Service error. */
878    NNRT_SERVICE_ERROR = 12,
879    /** Device error. */
880    NNRT_DEVICE_ERROR = 13,
881    /** Device busy. */
882    NNRT_DEVICE_BUSY = 14,
883    /** Operation canceled. */
884    NNRT_CANCELLED = 15,
885    /** Access denied. */
886    NNRT_PERMISSION_DENIED = 16,
887    /** Timeout. */
888    NNRT_TIME_OUT = 17,
889    /** Invalid tensor. */
890    NNRT_INVALID_TENSOR = 18,
891    /** Invalid node. */
892    NNRT_INVALID_NODE = 19,
893    /** Invalid input. */
894    NNRT_INVALID_INPUT = 20,
895    /** Invalid output. */
896    NNRT_INVALID_OUTPUT = 21,
897    /** Invalid data type. */
898    NNRT_INVALID_DATATYPE = 22,
899    /** Invalid data layout. */
900    NNRT_INVALID_FORMAT = 23,
901    /** Invalid tensor name. */
902    NNRT_INVALID_TENSOR_NAME = 24,
903    /** Invalid shape. */
904    NNRT_INVALID_SHAPE = 25,
905    /** Dimension range exceeded. */
906    NNRT_OUT_OF_DIMENTION_RANGES = 26,
907    /** Invalid cache. */
908    NNRT_INVALID_BUFFER = 27,
909    /** Invalid cache size. */
910    NNRT_INVALID_BUFFER_SIZE = 28,
911    /** Invalid performance mode. */
912    NNRT_INVALID_PERFORMANCE_MODE = 29,
913    /** Invalid priority. */
914    NNRT_INVALID_PRIORITY = 30,
915    /** Invalid model. */
916    NNRT_INVALID_MODEL = 31,
917    /** Invalid model cache. */
918    NNRT_INVALID_MODEL_CACHE = 32,
919    /** Operator not supported. */
920    NNRT_UNSUPPORTED_OP = 33
921};
922/** @} */
923