1/*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @addtogroup NNRt
18 * @{
19 *
20 * @brief Provides a unified interface for AI chip drivers to access OpenHarmony.
21 * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field.
22 *
23 * @since 3.2
24 * @version 2.1
25 */
26
27/**
28 * @file NodeAttrTypes.idl
29 *
30 * @brief Defines the parameters and functionality of AI model operators.
31 *
32 * All structures in this file declare only operator attributes and do not contain the interfaces for executing\n
33 * operator functions.
34 * - 1. The operators in the file are in one-to-one correspondence with a {@link NodeType}. In model inference,\n
35 * {@link NodeType} is stored in nodeType of {@link Node}.
36 * - 2. Each operator has at least one input and one output. The input is the tensor received by the operator,\n
37 * and the output is the tensor produced after the operator operation. The relationship between the input, operator,\n
38 * and output is determined by <b>inputIndex</b> and <b>outIndex</b> of the {@link Node} structure.
39 *
40 * @since 3.2
41 * @version 2.1
42 */
43
44/**
45 * @brief Defines the package path of the NNRt module.
46 *
47 * @since 3.2
48 * @version 2.1
49 */
50package ohos.hdi.nnrt.v2_1;
51
52import ohos.hdi.nnrt.v2_1.NnrtTypes;
53
54/**
55 * @brief Obtains the absolute value of the input tensor.
56 *
57 * The {@link NodeType} of this operator is <b>NODE_TYPE_ABS</b>.
58 *
59 * Input:
60 *
61 * * <b>x</b>, an n-dimensional tensor.
62 *
63 * Output:
64 *
65 * * The absolute value of the input tensor.
66 *
67 * @since 5.0
68 * @version 2.1
69 */
70struct Abs
71{
72};
73
74/**
75 * @brief Defines the operator of the activation type. All activation functions belong to this operator.\n
76 * The specific activation function type is determined by the parameters.
77 *
78 * The {@link NodeType} of this operator is <b>NODE_TYPE_ACTIVATION</b>.
79 *
80 * Input:
81 *
82 * * <b>x</b>, an n-dimensional tensor.
83 *
84 * Output:
85 *
86 * * A tensor returned after the activation function is executed.
87 *
88 * @since 3.2
89 * @version 2.1
90 */
91struct Activation
92{
93    /** Activation function type. */
94    enum ActivationType activationType;
95    /** Size factor, used for the <b>LeakyReLU</b> and <b>ELU</b> activation functions. */
96    float alpha;
97    /** Minimum value, used for the <b>HardTanh</b> activation function. */
98    float minVal;
99    /** Maximum value, used for the <b>HardTanh</b> activation function. */
100    float maxVal;
101    /** Whether to use the approximation algorithm. It is used for the <b>GRLU</b> activation function. */
102    boolean approximate;
103};
104
105/**
106 * @brief Adds tensors. The output shape is the same as the input one after broadcasting, and the data type\n
107 * is the one with higher precision of the two inputs.
108 *
109 * The {@link NodeType} of this operator is <b>NODE_TYPE_ADD_FUSION</b>.
110 *
111 * Input:
112 *
113 * * <b>x</b>, the first input tensor.
114 * * <b>y</b>, the second input tensor. The data type must be the same as that of the first tensor.
115 *
116 * * Output:
117 *
118 * * Sum of the elements of <b>x</b> and <b>y</b>. The data shape is the same as the one after broadcasting,\n
119 *   and the data type is the one with higher precision of the two inputs.
120 *   If <b>activationType</b> is configured, the specified activation function will be called before
121 *   the output is returned.
122 *
123 * @since 3.2
124 * @version 2.1
125 */
126struct AddFusion
127{
128    /** Activation function type. For details, see {@link ActivationType}. */
129    enum ActivationType activationType;
130};
131
132/**
133 * @brief Determine whether all emements in a given tensor are non-zero. It returns a boolean tensor\n
134 * where each element is 'True' if corresponding element in the input tensor is non-zero, and 'False' otherwise.
135 *
136 * The {@link NodeType} of this operator is <b>NODE_TYPE_ALL</b>.
137 *
138 * Input:
139 *
140 * * <b>x</b>, a N-dimension tensor of shape <b>(N,*)</b>, where * indicates any number of additional dimensions.
141 * * <b>axis</b>, scalar or tensor, indices the dimension to be computed.
142 *
143 * Output:
144 *
145 * * 1-dimension or n-dimension tensor with boolean data type.
146 *
147 * @since 5.0
148 * @version 2.1
149 */
150struct All
151{
152    /** Whether to keep dimension info. */
153    long keepDims;
154};
155
156/**
157 * @brief Obtains the first K indices or values of a cross-axis tensor.
158 *
159 * The {@link NodeType} of this operator is <b>NODE_TYPE_ARGMAX_FUSION</b>.
160 *
161 * Input:
162 *
163 * * <b>x</b>, a tensor of shape <b>(N,*)</b>, where * indicates any number of additional dimensions.
164 *
165 * Output:
166 *
167 * * First <b>K</b> indices or values before the maximum input tensor on the axis.
168 *
169 * @since 3.2
170 * @version 2.1
171 */
172struct ArgMaxFusion
173{
174    /** Target axis where the maximum indices or values are obtained. */
175    long axis;
176    /** First <b>K</b> maximum values on the axis. */
177    long topK;
178    /** Whether to keep the output dimensions the same as the input dimensions. */
179    boolean keepDims;
180    /** Return the index if the value is <b>false</b>. Return the value if the value is <b>true</b>.\n
181      * The default value is <b>false</b>.
182      */
183    boolean outMaxValue;
184};
185
186/**
187 * @brief Asserts that the given condition si true.
188 * If <b>condition</b> evalutes to false, print the list of tensors in data.
189 * Summerize determines how many entries of the tensors to print.
190 *
191 * The {@link NodeType} of this operator is <b>NODE_TYPE_ASSERT</b>.
192 *
193 * Input:
194 *
195 * * <b>condition</b>, the condition to evalute.
196 * * <b>data</b>, the tensors to print out when condition is false.
197 *
198 * Output:
199 *
200 * * Result value judged by condition. If the condition is not true, an Error is returned.
201 *
202 * @since 5.0
203 * @version 2.1
204 */
205struct Assert
206{
207    /** The number of entries for each tensor is printed. */
208    long summarize;
209};
210
211/**
212 * @brief Applies a 2D average pooling on the input tensor. The int8 quantization input is supported.
213 *
214 * The {@link NodeType} of this operator is <b>NODE_TYPE_AVGPOOL_FUSION</b>.
215 *
216 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.\n
217 * In other cases, <b>padding</b> must be <b>0</b>.
218 *
219 * Input:
220 *
221 * * <b>x</b>, an n-dimensional tensor.
222 *
223 * Output:
224 *
225 * * Tensor after average pooling.
226 *
227 * @since 3.2
228 * @version 2.1
229 */
230struct AvgPoolFusion
231{
232    /**
233     * Kernel size used to obtain the average value. It is an int array in the format of [kernel_height, kernel_weight]\n
234     * with length of 2.
235     * The first number indicates the kernel height, and the second number indicates the kernel width.
236     */
237    long[] kernelSize;
238    /**
239     * Distance of kernel moving. The value is an int array [stride_height, stride_weight] with length of 2.\n
240     * The first number indicates the moving size in height, and the second number indicates the moving size in width.
241     */
242    long[] strides;
243    /**
244     * <b>x</b> is padded with an int array [top, bottom, left, right] with length of 4, and the nearest neighbor values\n
245     * are used for padding.
246     */
247    long[] pad;
248    /** Padding method. */
249    enum PadMode padMode;
250    /** Numerical operation mode of the output tensor. */
251    enum RoundMode roundMode;
252    /** Format of the data during calculation. For details, see {@link Format}. */
253    enum Format format;
254    /** Whether to do global pooling. */
255    boolean global;
256    /** Activation function type. For details, see {@link ActivationType}. */
257    enum ActivationType activationType;
258};
259
260/**
261 * @brief Divides the batch dimension of a 4D tensor into small blocks by <b>blockShape</b>, and interleaves these blocks\n
262 * back into the spatial dimension.
263 *
264 * The {@link NodeType} of this operator is <b>NODE_TYPE_BATCH_TO_SPACE_ND</b>.
265 *
266 * Input:
267 *
268 * * <b>x</b>, an n-dimensional tensor.
269 *
270 * Output:
271 *
272 * * Output tensor. Assume that the shape of <b>x</b> is (n,h,w,c) and the shape of output is (n',h',w',c'):
273 * \f$ n' = n / (blockShape[0] * blockShape[1])\f$<br>
274 * \f$ h' = h * blockShape[0] - crops[0][0] - crops[0][1] \f$<br>
275 * \f$ w' = w * blockShape[1] - crops[1][0] - crops[1][1] \f$<br>
276 * \f$ c'= c \f$
277 *
278 * @since 3.2
279 * @version 2.1
280 */
281struct BatchToSpaceND
282{
283    /** Block size, which is an array [height_block, weight_block] with length of 2. */
284    long[] blockShape;
285    /**
286     * Crop values for the spatial dimension.
287     * It is a 2D array [crop0_start, crop0_end], [crop1_start, crop1_end] with the shape of (2, 2).
288     */
289    long[][] crops;
290};
291
292/**
293 * @brief Offsets the data in each dimension of the input tensor.
294 *
295 * The {@link NodeType} of this operator is <b>NODE_TYPE_BIAS_ADD</b>.
296 *
297 * Input:
298 *
299 * * <b>x</b>, an n-dimensional tensor.
300 * * <b>bias</b>, the bias tensor.
301 *
302 * Output:
303 *
304 * * Output tensor, which is the sum of the input tensor and the bias in each dimension.
305 *
306 * @since 3.2
307 * @version 2.1
308 */
309struct BiasAdd
310{
311};
312
313/**
314 * @brief Broadcast a tensor for a compatiable shape.
315 *
316 * The {@link NodeType} of this operator is <b>NODE_TYPE_BROADCAST_TO</b>.
317 *
318 * Input:
319 *
320 * * <b>x</b>, an n-dimensional tensor to broadcast.
321 *
322 * Output:
323 *
324 * * Output tensor after broadcasted.
325 *
326 * @since 5.0
327 * @version 2.1
328 */
329struct BroadcastTo
330{
331    /** An 1-dimensional Tensor, the shape of the desired output. */
332    long[] shape;
333};
334
335/**
336 * @brief Converts the tensor data type.
337 *
338 * The {@link NodeType} of this operator is <b>NODE_TYPE_CAST</b>.
339 *
340 * Input:
341 *
342 * * <b>x</b>, an n-dimensional tensor.
343 * * <b>type</b>, the target type of the data.
344 *
345 * Output:
346 *
347 * * A tensor with the specified data type.
348 *
349 * @since 3.2
350 * @version 2.1
351 */
352struct Cast
353{
354};
355
356/**
357 * @brief Returns element-wise smallest integer in not less than x.
358 *
359 * The {@link NodeType} of this operator is <b>NODE_TYPE_CEIL</b>.
360 *
361 * Input:
362 *
363 * * <b>x</b>, an n-dimensional tensor.
364 *
365 * Output:
366 *
367 * * Output tensor after ceiled.
368 *
369 * @since 5.0
370 * @version 2.1
371 */
372struct Ceil
373{
374};
375
376/**
377 * @brief Returns a tensor of the same type and shape as input tensor with its value clipped to min and max.
378 * Any values less than <b>min</b> are set to <b>min</b>. Any values greater than <b>max</b> are set to <b>max</b>.
379 *
380 * The {@link NodeType} of this operator is <b>NODE_TYPE_CLIP</b>.
381 *
382 * Input:
383 *
384 * * <b>x</b>, an n-dimensional tensor to clip.
385 *
386 * Output:
387 *
388 * * Output tensor after clipped.
389 *
390 * @since 5.0
391 * @version 2.1
392 */
393struct Clip
394{
395    /** Maximum value, above which element is replaced by max. It must be a scalar(tensor of empty shape). */
396    float max;
397    /** Minimum value, under which element is replaced by min. It must be a scalar(tensor of empty shape). */
398    float min;
399};
400
401/**
402 * @brief Connects tensors in the specified axis or connects input tensors along with the given axis.
403 *
404 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONCAT</b>.
405 *
406 * Input:
407 *
408 * * Tensors with the same dimension.
409 *
410 * Output:
411 *
412 * * Result of the tensors connected.
413 *
414 * @since 3.2
415 * @version 2.1
416 */
417struct Concat
418{
419    /** Axis specified. The value must be less than the number of dimensions of the input tensor. */
420    long axis;
421};
422
423/**
424 * @brief Calculates the 2D convolution on a 4D tensor.
425 *
426 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONV2D_FUSION</b>.
427 *
428 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.
429 * In other cases, <b>padding</b> must be <b>0</b>.
430 *
431 * Input:
432 *
433 * * <b>x</b>, a 4D tensor in NHWC format.
434 * * <b>weight</b>, a convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
435 *   The value of <b>inChannel</b> divided by <b>group</b>must be an integer.
436 * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.\n
437 *   In the quantization scenario, <b>bias</b> does not need to be quantized.
438 *   If quantization is required, the input data must be of the int32 type.
439 *   The actual quantization parameter is determined by <b>x</b> and <b>weight</b>.
440 *
441 * Output:
442 *
443 * * Convolution output.
444 *
445 * @since 3.2
446 * @version 2.1
447 */
448struct Conv2DFusion
449{
450    /** Size (height and width) of the convolution kernel. */
451    long[] kernelSize;
452    /**
453     * Movement stride of the convolution kernel in height and weight.\n
454     * It is an int array [strideHeight, strideWidth] with length of 2.
455     */
456    long[] stride;
457    /**
458     * Dilation size of the convolution kernel in height and weight. It is an int array in the format of\n
459     * [dilationHeight, dilationWidth].
460     * The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>x</b>.
461     */
462    long[] dilation;
463    /** Padding mode. For details, see {@link PadMode}. */
464    enum PadMode padMode;
465    /**
466     * Padding around <b>x</b> in the height and width directions. It is an int array [top, bottom, left, right]\n
467     * with length of 4.
468     */
469    long[] padList;
470    /**
471     * Splits <b>x</b> into groups by <b>inChannel</b>. The <b>group</b> value is of the int type.
472     * If <b>group</b> is <b>1</b>, it is a conventional convolution.
473     * If <b>group</b> is <b>inChannel</b>, it is depthwiseConv2d. In this case, group==inChannel==outChannel.
474     * If <b>group</b> is greater than <b>1</b> and less than <b>inChannel</b>, it is group convolution.\n
475     * In this case, outChannel==group.
476     */
477    long group;
478    /** Number of input channels. */
479    long inChannel;
480    /** Number of output channels. */
481    long outChannel;
482    /** Activation function type. For details, see {@link ActivationType}. */
483    enum ActivationType activationType;
484};
485
486/**
487 * @brief Calculates a 2D transposed convolution on a 4D tensor.
488 *
489 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONV2D_TRANSPOSE_FUSION</b>.
490 *
491 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.\n
492 * In other cases, <b>padding</b> must be <b>0</b>.
493 *
494 * Input:
495 *
496 * * <b>x</b>, a 4D tensor in NHWC format.
497 * * <b>weight</b>, a convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
498 *   The value of <b>inChannel</b> divided by <b>group</b>must be an integer.
499 * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.\n
500 *   In the quantization scenario, <b>bias</b> does not need to be quantized.
501 *   If quantization is required, the input data must be of the int32 type.
502 *   The actual quantization parameter is determined by <b>x</b> and <b>weight</b>.
503 *
504 * Output:
505 *
506 * * An n-dimensional tensor.
507 *
508 * @since 3.2
509 * @version 2.1
510 */
511struct Conv2dTransposeFusion
512{
513    /** Size (height and width) of the convolution kernel. */
514    long[] kernelSize;
515    /** Movement stride of the convolution kernel in height and weight. It is an int array [strideHeight, strideWidth]\n
516      * with length of 2.
517      */
518    long[] stride;
519    /** Dilation size of the convolution kernel in height and weight. It is an int array [dilationHeight, dilationWidth]\n
520      * with length of 2.
521      * The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>x</b>.
522      */
523    long[] dilation;
524    /** Padding mode. For details, see {@link PadMode}. */
525    enum PadMode padMode;
526    /** Padding around the input <b>x</b> in the height and width directions. It is an int array [top, bottom, left, right]\n
527      * with length of 4. */
528    long[] padList;
529    /**
530     * Splits <b>x</b> into groups by <b>inChannel</b>.
531     * If <b>group</b> is <b>1</b>, it is a conventional convolution.
532     * If group is greater than <b>1</b> and less than or equal to <b>inChannel</b>, this is a group convolution.
533     */
534    long group;
535    /** Number of input channels. */
536    long inChannel;
537    /** Number of output channels. */
538    long outChannel;
539    /** Activation function type. For details, see {@link ActivationType}. */
540    enum ActivationType activationType;
541    /**
542     * A list of integer array with two elements, specifying the paddings along the height and width of the output tensor.
543     */
544    long[] outputPaddings;
545};
546
547/**
548 * @brief Calculates the cosine of the given input tensor, element-wise.
549 *
550 * The {@link NodeType} of this operator is <b>NODE_TYPE_COS</b>.
551 *
552 * Input:
553 *
554 * * <b>x</b>, an n-dimensional tensor.
555 *
556 * Output:
557 *
558 * * The cosine of the input tensor computed element-wise.
559 *
560 * @since 5.0
561 * @version 2.1
562 */
563struct Cos
564{
565};
566
567/**
568 * @brief Generate a tensor with given value and shape.
569 *
570 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONSTANT_OF_SHAPE</b>.
571 *
572 * Input:
573 *
574 * * <b>x</b>, an 1-dimensional tensor, indicates the shape of the expected output tensor.
575 * * If empty tensor is given, the output would be a scalar. All values must be >= 0.
576 *
577 * Output:
578 *
579 * * Output tensor of shape specified by <b>x</b>.
580 *
581 * @since 5.0
582 * @version 2.1
583 */
584struct ConstantOfShape
585{
586    /** The data type of output elements. */
587    long dataType;
588    /** The value of the output elements. Should be a one-element tensor. */
589    float[] value;
590};
591
592/**
593 * @brief Crop given tensor acrodding to axis and offset.
594 *
595 * The {@link NodeType} of this operator is <b>NODE_TYPE_CROP</b>.
596 *
597 * Input:
598 *
599 * * <b>x</b>, an n-dimensional tensor.
600 * * <b>shape</b>, an 1-dimensional tensor, indices cropped windows dimension.
601 *
602 * Output:
603 *
604 * * Cropped output tensor.
605 *
606 * @since 5.0
607 * @version 2.1
608 */
609struct Crop
610{
611    /** Cropped dimension. */
612    long axis;
613    /** Cropped offset per dimension. */
614    long[] offset;
615};
616
617/**
618 * @brief The output of the object detection model is post-processed, including decoding the bounding box,
619 * class probability and score of the model output, and then performing non-maximum suppression (NMS) to remove
620 * the overlapping bounding box, and finally outputting the detection result.
621 *
622 * The {@link NodeType} of this operator is <b>NODE_TYPE_DETECTION_POST_PROCESS</b>.
623 *
624 * Input:
625 *
626 * * <b>bbox</b>, bounding box for model output.
627 * * <b>scores</b>, probability of class score for model output.
628 * * <b>anchors</b>, The coordinates and size information of the candidate boxes used to generate the detection box.
629 *
630 * Output:
631 *
632 * * <b>bboxes</b>, 3-dimensional tensor with an inner array representing the coordinate values of the object
633 * *     detection box.
634 * * <b>classes</b>,2-dimensional tensor with an internal numeric value representing the classification index
635 * *     corresponding to each detection box.
636 * * <b>confidences</b>, 2-dimensional tensor with an internal numerical value indicating the confidence level
637 * *     of the detected object.
638 * * <b>numDetections</b>, 1-dimensional tensor, the number of test results.
639 *
640 * @since 5.0
641 * @version 2.1
642 */
643struct DetectionPostProcess
644{
645    /** Format of input tensor, default NCHW. */
646    enum Format format;
647    /** The size of the input tensor. */
648    long inputSize;
649    /** The scaling factor used to convert the output from the normalized form to the original image coordinates. */
650    float[] scale;
651    /** The threshold of overlapping region during NMS. */
652    float nmsIoUThreshold;
653    /** The socre threshold used to select target bbox duing NMS. */
654    float nmsScoreThreshold;
655    /** Maximum of bboxes per image. */
656    long maxDetections;
657    /** Maximum of bboxes per class. */
658    long detectionsPerClass;
659    /** Maximum of reserved classes per bboxes. */
660    long maxClassesPerDetection;
661    /** Number of target classes to be detected. */
662    long numClasses;
663    /** Whether use NMS based on IoU threshold. */
664    boolean useRegularNms;
665    /** Whether need to quantize. */
666    boolean outQuantized;
667};
668
669/**
670 * @brief DepthToSpace rearranges (permutes) data from depth into blocks of spatial data.
671 * This is the reverse transformation of SpaceToDepth. More specifically, this op outputsa copy of the input tensor\n
672 * where values from the depth dimension are moved in spatial blocks to the height and width dimensions.
673 * By default, mode = DCR. In the DCR mode, elements along the depth dimension from the input tensor are rearranged\n
674 * in the following order: depth, column, and then row.
675 *
676 * The {@link NodeType} of this operator is <b>NODE_TYPE_DEPTH_TO_SPACE</b>.
677 *
678 * Input:
679 *
680 * * <b>x</b>, input tensor of 4-dimensional, with specific format of NHWC or NCHW.
681 * * where N is the batch axis, H is the height, W is the width and C is the channel or depth.
682 *
683 * Output:
684 *
685 * * Output tensor of [N, H * blockSize, W * blockSize, C/(blockSize * blockSize)] for NHWC format\n
686 *   or [N, C/(blockSize * blockSize), H * blockSize, W * blockSize] for NCHW format.
687 *
688 * @since 5.0
689 * @version 2.1
690 */
691struct DepthToSpace
692{
693    /** Blocks of [blockSize, blockSize] are moved. */
694    long blockSize;
695    /** Format of input tensor, default NCHW. */
696    enum Format format;
697    /** DCR (default) for depth-column-row order re-arrangement. Use CRD for column-row-depth order. */
698    String mode;
699};
700
701/**
702 * @brief Divides the first tensor by the second tensor element-wise.
703 *
704 * The {@link NodeType} of this operator is <b>NODE_TYPE_DIV_FUSION</b>.
705 *
706 * Input:
707 *
708 * * <b>x1</b>, a tensor of the int or float type.
709 * * <b>x2</b>, a tensor of the int or float type.
710 *
711 * Output:
712 *
713 * * Quotient of the two inputs.
714 *
715 * @since 3.2
716 * @version 2.1
717 */
718struct DivFusion
719{
720    /** Activation function type. For details, see {@link ActivationType}. */
721    enum ActivationType activationType;
722};
723
724/**
725 * @brief Performs an element-wise operation.
726 *
727 * The {@link NodeType} of this operator is <b>NODE_TYPE_ELTWISE</b>.
728 *
729 * Input:
730 *
731 * * <b>x1</b>, the first input tensor.
732 * * <b>x2</b>, the second input tensor.
733 *
734 * Output:
735 *
736 * * A tensor with the same data type and shape as <b>x1</b>.
737 *
738 * @since 3.2
739 * @version 2.1
740 */
741struct Eltwise
742{
743    /** Element-wise operation type. For details, see {@link EltwiseMode}. */
744    enum EltwiseMode mode;
745};
746
747/**
748 * @brief Returns the tensor resulted from performing the equal logical operation elementwise\n
749 * on the input tensors <b>x1</b> and <b>x2</b>.
750 *
751 * The {@link NodeType} of this operator is <b>NODE_TYPE_EQUAL</b>.
752 *
753 * Input:
754 *
755 * * <b>x1</b>, the first input operand.
756 * * <b>x2</b>, the second input operand.
757 *
758 * Output:
759 *
760 * * Result tensor.
761 *
762 * @since 5.0
763 * @version 2.1
764 */
765struct Equal
766{
767};
768
769/**
770 * @brief Calculates the exponential of the given input tensor, element-wise.
771 * ExpFusion computes outputs y = base ^ (shift + scale * x), for base > 0.
772 * Or if base is set to the default (-1), base is set to e,
773 * so y = exp(shift + scale * x).
774 *
775 * The {@link NodeType} of this operator is <b>NODE_TYPE_EXPFUSION</b>.
776 *
777 * Input:
778 *
779 * * <b>x</b>, input tensor.
780 *
781 * Output:
782 *
783 * * The exponential of the input tensor computed element-wise.
784 *
785 * @since 5.0
786 * @version 2.1
787 */
788struct ExpFusion
789{
790    /** base of exponential function, default -1 for a value of <b>e</b>, must be > 0 */
791    float base;
792    /** amplifcation factor of independent value, default 1 */
793    float scale;
794    /** offset of independent value, default 1 */
795    float shift;
796};
797
798/**
799 * @brief Adds an additional dimension to a tensor at the given axis.
800 *
801 * The {@link NodeType} of this operator is <b>NODE_TYPE_EXPAND_DIMS</b>.
802 *
803 * Input:
804 *
805 * * <b>x</b>, an n-dimensional tensor.
806 * * <b>axis</b>: index of the dimension to be added. The value is of the int32_t type and must be a constant\n
807 *   in the range [-dim-1, dim].
808 *
809 * Output:
810 *
811 * * Operator with an additional dimension at the given axis.
812 *
813 * @since 3.2
814 * @version 2.1
815 */
816struct ExpandDims
817{
818};
819
820/**
821 * @brief Flattens the input tensor into a 2D matrix. If input tensor has shape (d_0, d_1, … d_n),
822 * then the output will have shape (d_0 X d_1 … d_(axis-1), d_axis X d_(axis+1) … X dn).
823 *
824 * The {@link NodeType} of this operator is <b>NODE_TYPE_FLATTEN</b>.
825 *
826 * Input:
827 *
828 * * <b>x</b>: A tensor of rank >= axis.
829 *
830 * Output:
831 *
832 * * A 2-dimension tensor with the contents of the input tensor, with input dimensions up to axis flattened to\n
833 *   the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.
834 *
835 * @since 5.0
836 * @version 2.1
837 */
838struct Flatten
839{
840    /** Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output.
841      * The value for axis must be in the range [-r, r], where r is the rank of the input tensor.
842      * Negative value means counting dimensions from the back. When axis = 0, the shape of the output tensor is\n
843      * (1, (d_0 X d_1 … d_n)), where the shape of the input tensor is (d_0, d_1, … d_n). */
844    long axis;
845};
846
847/**
848 * @brief Returns element-wise largest integer not greater than x.
849 *
850 * The {@link NodeType} of this operator is <b>NODE_TYPE_FLOOR</b>.
851 *
852 * Input:
853 *
854 * * <b>x</b>, an n-dimensional tensor.
855 *
856 * Output:
857 *
858 * * Output tensor after floored.
859 *
860 * @since 5.0
861 * @version 2.1
862 */
863struct Floor
864{
865};
866
867/**
868 * @brief Creates a tensor of the specified dimensions and fills it with a scalar.
869 *
870 * The {@link NodeType} of this operator is <b>NODE_TYPE_FILL</b>.
871 *
872 * Input:
873 *
874 * * <b>value</b>: scalar used to fill the tensor.
875 * * <b>shape</b>, which specifies the dimensions of the tensor to create.
876 *
877 * Output:
878 *
879 * * Tensor filled by the scaler.
880 *
881 * @since 3.2
882 * @version 2.1
883 */
884struct Fill
885{
886};
887
888/**
889 * @brief Applies full connection for the input data.
890 *
891 * The {@link NodeType} of this operator is <b>NODE_TYPE_FULL_CONNECTION</b>.
892 *
893 * When <b>useAxis</b> is <b>true</b>, <b>axis</b> must be set. When <b>useAxis</b> is <b>false</b>,\n
894 * <b>axis</b> is <b>0</b>.
895 *
896 * Input:
897 *
898 * * <b>x</b>, an n-dimensional tensor.
899 * * <b>weight</b>: weight tensor for a full connection.
900 * * <b>bias</b>, a full-connection bias. In quantization scenarios, a quantized parameter is not required.
901 * * If quantization is required, the data must be of the int32 type. The actual quantization parameter is\n
902 *   determined by <b>x</b> and <b>weight</b>.
903 *
904 * Output:
905 *
906 * * <b>output</b>: computed tensor.
907 *
908 * @since 3.2
909 * @version 2.1
910 */
911struct FullConnection
912{
913    /** Whether to use the bias. */
914    boolean hasBias;
915    /** Whether to use the axis. */
916    boolean useAxis;
917    /** Axis specified for the full connection. The specified axis and its following axes are converted into\n
918      * a 1D tensor and then apply the full connection.
919      */
920    long axis;
921    /** Activation function type. For details, see {@link ActivationType}. */
922    enum ActivationType activationType;
923};
924
925/**
926 * @brief Performs batch normalization for a tensor.
927 *
928 * The {@link NodeType} of this operator is <b>NODE_TYPE_FUSED_BATCH_NORM</b>.
929 *
930 * Input:
931 *
932 * * <b>x</b>: a tensor of shape [N, ..., C], that is, the nth dimension is the number of channels.
933 * * <b>scale</b>: 1D tensor of the scaling factor used to scale the first normalized tensor.
934 * * <b>offset</b>: 1D tensor used to move to the first normalized tensor.
935 * * <b>mean</b>: 1D tensor of the overall mean value. It is used only for inference. In case of training,\n
936 *   this parameter must be left empty.
937 * * <b>variance</b>: 1D tensor used for the overall variance. It is used only for inference. In case of training,\n
938 *   this parameter must be left empty.
939 *
940 * Output:
941 *
942 * * <b>output</b>: computed tensor.
943 *
944 * @since 3.2
945 * @version 2.1
946 */
947struct FusedBatchNorm
948{
949    /** A small value close to zero. It is used to ensure that the divisor is not 0. */
950    float epsilon;
951};
952
953/**
954 * @brief Returns the slice of the input tensor based on the specified index and axis.
955 *
956 * The {@link NodeType} of this operator is <b>NODE_TYPE_GATHER</b>.
957 *
958 * Input:
959 *
960 * * <b>x</b>, an n-dimensional tensor.
961 * * <b>inputIndices</b>, indices of elements of the original tensor. The value is an array of the int type\n
962 *   and must be in the range [0, x.shape[axis]).
963 * * <b>axis</b>, the axis on which <b>x</b> is sliced. The value is an array with one element of the int32_t type.
964 *
965 * Output:
966 *
967 * * Sliced tensor.
968 *
969 * @since 3.2
970 * @version 2.1
971 */
972struct Gather
973{
974};
975
976/**
977 * @brief Returns the tensor resulted from performing the greater logical operation elementwise\n
978 * on the input tensors <b>x1</b> and <b>x2</b>.
979 *
980 * The {@link NodeType} of this operator is <b>NODE_TYPE_GREATER</b>.
981 *
982 * Input:
983 *
984 * * <b>x1</b>, the first input operand.
985 * * <b>x2</b>, the second input operand.
986 *
987 * Output:
988 *
989 * * Result tensor.
990 *
991 * @since 5.0
992 * @version 2.1
993 */
994struct Greater
995{
996};
997
998/**
999 * @brief Returns the tensor resulted from performing the greater-equal logical operation elementwise\n
1000 * on the input tensors <b>x1</b> and <b>x2</b>.
1001 *
1002 * The {@link NodeType} of this operator is <b>NODE_TYPE_GREATER_EQUAL</b>.
1003 *
1004 * Input:
1005 *
1006 * * <b>x1</b>, the first input operand.
1007 * * <b>x2</b>, the second input operand.
1008 *
1009 * Output:
1010 *
1011 * * Result tensor.
1012 *
1013 * @since 5.0
1014 * @version 2.1
1015 */
1016struct GreaterEqual
1017{
1018};
1019
1020/**
1021 * @brief Carries out instance normalization as formula <b>y = scale * (x - mean) / sqrt(variance + epsilon) + B</b>,
1022 * where mean and variance are computed per instance per channel.
1023 *
1024 * The {@link NodeType} of this operator is <b>NODE_TYPE_INSTANCE_NORM</b>.
1025 *
1026 * Input:
1027 *
1028 * * <b>x</b>, input data tensor from the previous operator;
1029 *   Dimensions for image case are (N x C x H x W), where N is the batch size,
1030 *   C is the number of channels, and H and W are the height and the width of the data.
1031 *   For non image case, the dimensions are in the form of (N x C x D1 x D2 … Dn), where N is the batch size.
1032 * * <scale> The input 1-dimensional scale tensor of size C.
1033 * * <bias> The input 1-dimensional bias tensor of size C.
1034 *
1035 * Output:
1036 *
1037 * * The output tensor of the same shape as input.
1038 *
1039 * @since 5.0
1040 * @version 2.1
1041 */
1042struct InstanceNorm
1043{
1044    /** The epsilon value to use to avoid division by zero. */
1045    float epsilon;
1046};
1047
1048/**
1049 * @brief Applies layer normalization for a tensor from the specified axis.
1050 *
1051 * The {@link NodeType} of this operator is <b>NODE_TYPE_LAYER_NORM_FUSION</b>.
1052 *
1053 * Input:
1054 *
1055 * * <b>x</b>, an n-dimensional tensor.
1056 * * <b>gamma</b>, an m-dimensional tensor. The dimensions of <b>gamma</b> must be the same as the shape of the part\n
1057 *   of the input tensor to normalize.
1058 * * <b>beta</b>, an m-dimensional tensor with the same shape as <b>gamma</b>.
1059 *
1060 * Output:
1061 *
1062 * * An n-dimensional tensor, with the same data type and shape as the input tensor.
1063 *
1064 * @since 3.2
1065 * @version 2.1
1066 */
1067struct LayerNormFusion
1068{
1069    /** Start axis of <b>x</b> to apply layer normalization. */
1070    long beginNormAxis;
1071    /** A value added to the denominator for numerical stability. */
1072    float epsilon;
1073    /** Whether to perform an element-wise operation. */
1074    boolean elementwiseAffine;
1075    /** Start axis of the parameter input (gamma, beta) to apply layer normalization.\n
1076      * The value must be in the range [-n, n).
1077      */
1078    long beginParamsAxis;
1079};
1080
1081/**
1082 * @brief LeakyRelu takes input data (Tensor) and an argument alpha, and produces one output data (Tensor)\n
1083 * where the function f(x) = alpha * x for x < 0, f(x) = x for x >= 0, is applied to the data tensor elementwise.
1084 *
1085 * The {@link NodeType} of this operator is <b>NODE_TYPE_LEAKY_RELU</b>.
1086 *
1087 * Input:
1088 *
1089 * * <b>x</b>, input tensor.
1090 *
1091 * Output:
1092 *
1093 * * Result tensor.
1094 *
1095 * @since 5.0
1096 * @version 2.1
1097 */
1098struct LeakyRelu
1099{
1100    /** Coefficient of leakage. */
1101    float negativeSlope;
1102};
1103
1104/**
1105 * @brief Returns the tensor resulted from performing the less logical operation elementwise\n
1106 * on the input tensors <b>x1</b> and <b>x2</b>.
1107 *
1108 * The {@link NodeType} of this operator is <b>NODE_TYPE_LESS</b>.
1109 *
1110 * Input:
1111 *
1112 * * <b>x1</b>, the first input operand.
1113 * * <b>x2</b>, the second input operand.
1114 *
1115 * Output:
1116 *
1117 * * Result tensor.
1118 *
1119 * @since 5.0
1120 * @version 2.1
1121 */
1122struct Less
1123{
1124};
1125
1126/**
1127 * @brief Calculates the result of <b>x1</b> <= <b>x2</b> element-wise.
1128 *
1129 * The {@link NodeType} of this operator is <b>NODE_TYPE_LESS_EQUAL</b>.
1130 *
1131 * Input:
1132 *
1133 * * <b>x1</b>, which can be a number, a Boolean value, or a tensor whose data type is number or Boolean.
1134 * * <b>x2</b>, which can be a number or a Boolean value if <b>x1</b> is a tensor; or a tensor with the data type\n
1135 *   of number or Boolean if <b>x1</b> is not a tensor.
1136 *
1137 * Output:
1138 *
1139 * * A tensor with the data type of Boolean. When a quantization model is used, the quantization parameters of\n
1140 *   the output cannot be omitted. However, values of the quantization parameters do not affect the result.
1141 *
1142 * @since 3.2
1143 * @version 2.1
1144 */
1145struct LessEqual
1146{
1147};
1148
1149/**
1150 * @brief Calculates the result of nature logarithm of the input <b>x</b>.
1151 *
1152 * The {@link NodeType} of this operator is <b>NODE_TYPE_LOG</b>.
1153 *
1154 * Input:
1155 *
1156 * * <b>x</b>, the input tensor.
1157 *
1158 * Output:
1159 *
1160 * * Result tensor.
1161 *
1162 * @since 5.0
1163 * @version 2.1
1164 */
1165struct Log
1166{
1167};
1168
1169/**
1170 * @brief Calculates the logical value of <b>x1</b> AND <b>x2</b> element-wise.
1171 *
1172 * The {@link NodeType} of this operator is <b>NODE_TYPE_LOGICAL_AND</b>.
1173 *
1174 * Input:
1175 *
1176 * * <b>x1</b>, tensor of type boolean or convert to boolean implicitly.
1177 * * <b>x2</b>, tensor of type boolean or convert to boolean implicitly.
1178 *
1179 * Output:
1180 *
1181 * * A tensor of type bool with the shape that <b>x1</b> and <b>x2</b> broadcast to.
1182 *
1183 * @since 5.0
1184 * @version 2.1
1185 */
1186struct LogicalAnd
1187{
1188};
1189
1190/**
1191 * @brief Calculates the logical value of NOT <b>x</b> element-wise.
1192 *
1193 * The {@link NodeType} of this operator is <b>NODE_TYPE_LOGICAL_NOT</b>.
1194 *
1195 * Input:
1196 *
1197 * * <b>x</b>, tensor of type boolean or convert to boolean implicitly.
1198 *
1199 * Output:
1200 *
1201 * * A tensor of type bool with the shape of <b>x</b>.
1202 *
1203 * @since 5.0
1204 * @version 2.1
1205 */
1206struct LogicalNot
1207{
1208};
1209
1210/**
1211 * @brief Calculates the logical value of <b>x1</b> OR <b>x2</b> element-wise.
1212 *
1213 * The {@link NodeType} of this operator is <b>NODE_TYPE_LOGICAL_OR</b>.
1214 *
1215 * Input:
1216 *
1217 * * <b>x1</b>, tensor of type boolean or convert to boolean implicitly.
1218 * * <b>x2</b>, tensor of type boolean or convert to boolean implicitly.
1219 *
1220 * Output:
1221 *
1222 * * A tensor of type bool with the shape that <b>x1</b> and <b>x2</b> broadcast to.
1223 *
1224 * @since 5.0
1225 * @version 2.1
1226 */
1227struct LogicalOr
1228{
1229};
1230
1231/**
1232 * @brief Normalize over local input regions.
1233 *
1234 * The {@link NodeType} of this operator is <b>NODE_TYPE_LRN</b>.
1235 *
1236 * Input:
1237 *
1238 * * <b>x</b>, input tensor of n-dimensional.
1239 *
1240 * Output:
1241 *
1242 * * Result output tensor.
1243 *
1244 * @since 5.0
1245 * @version 2.1
1246 */
1247struct LRN
1248{
1249    /** Half-width of the 1-dimension normalization window. */
1250    long depthRadius;
1251    /** Offset. */
1252    float bias;
1253    /** Scale factor. */
1254    float alpha;
1255    /** Exponent. */
1256    float beta;
1257    /** Specifies normalization region. Options: "ACROSS_CHNNEL". */
1258    String normRegion;
1259};
1260
1261/**
1262 * @brief Calculate the L2-normalize of the input using the given axis.
1263 *
1264 * The {@link NodeType} of this operator is <b>NODE_TYPE_L2_NORMALIZE_FUSION</b>.
1265 *
1266 * Input:
1267 *
1268 * * <b>x</b>, input to compute the L2-normalization.
1269 *
1270 * Output:
1271 *
1272 * * Result tensor with the same type and shape as input <b>x</b>.
1273 *
1274 * @since 5.0
1275 * @version 2.1
1276 */
1277struct L2NormalizeFusion
1278{
1279    /** The axis on which to apply normalization, -1 means last axis, default: 0.*/
1280    long[] axis;
1281    /** Value added for numerical stability. default: 1e-4; */
1282    float epsilon;
1283    /** Activation function type. For details, see {@link ActivationType}. */
1284    enum ActivationType activationType;
1285};
1286
1287/**
1288 * @brief Computes an one-layer LSTM. This operator is usually supported via some custom implementation.
1289 *
1290 * The {@link NodeType} of this operator is <b>NODE_TYPE_LSTM</b>.
1291 *
1292 * Input:
1293 *
1294 * * <b>input</b>, input tensor, shape is [seqLen, batchSize, inputSize].
1295 * * <b>weightIh</b>, weight tensor of input-layer to hidden-layer,
1296 *   shape is [numDirections * numLayers, 4 * hiddenSize, inputSize].
1297 * * <b>weightHh</b>, weight tensor of hidden-layer to hidden-layer,
1298 *   shape is [numDirections * numLayers, 4 * hiddenSize, hiddenSize].
1299 * * <b>bias</b>, bias tensor of input-layer and hidden-layer to hidden-layer,
1300 *   shape is [numDirections * numLayers, 8 * hiddenSize].
1301 * * <b>hiddenState</b>, init state of hidden-layer, shape is [numDirections * numLayers, batchSize, hiddenSize].
1302 * * <b>cellState</b>, init state of cell, shape is [numDirections * numLayers, batchSize, hiddenSize].
1303 *
1304 * Output:
1305 *
1306 * * <b>output</b>, a tensor that concats all the intermediate output tensor of the hidden,
1307 *   shape is [seqLen, batchSize, numDirections * realHiddenSize].
1308 * * <b>hiddenState</b>, the last output tensor of the hidden-layer,
1309 *   shape is [numDirections * numLayers, batchSize, realHiddenSize].
1310 * * <b>cellState</b>, the last output tensor of the cell,
1311 *   shape is [numDirections * numLayers, batchSize, hiddenSize].
1312 *
1313 * @since 5.0
1314 * @version 2.1
1315 */
1316struct LSTM
1317{
1318    /** Whether the LSTM operation is bi-directional. */
1319    boolean bidirectional;
1320    /** Whether the operation contains bias. */
1321    boolean hasBias;
1322    /** Size of input tensor. */
1323    long inputSize;
1324    /** Size of hidden state tensor. */
1325    long hiddenSize;
1326    /** Layers of LSTM network. */
1327    long numLayers;
1328    /** Number of directions, value is 2 if direction == bidirectional else 1. */
1329    long numDirections;
1330    /** Dropout probalility of each layer except first-layer. */
1331    float dropout;
1332    /** Probalility that the cell state retains the previous state. Default: 0. */
1333    float zoneoutCell;
1334    /** Probalility that the hidden state retains the previous state. Default: 0. */
1335    float zoneoutHidden;
1336    /** If projSize > 0, will use LSTM with projections of corresponding size. Default: 0. */
1337    long projSize;
1338};
1339
1340/**
1341 * @brief Calculates the matrix product of <b>x1</b> and <b>x2</b>.
1342 *
1343 * The {@link NodeType} of this operator is <b>NODE_TYPE_MATMUL_FUSION</b>.
1344 *
1345 * Input:
1346 *
1347 * * <b>x1</b>, an n-dimensional tensor, whose data type can be number or Boolean.
1348 * * <b>x2</b>, an n-dimensional tensor, whose data type can be number or Boolean.
1349 *
1350 * Output:
1351 *
1352 * * Matrix product of the inputs. When type! When = DATA_TYPE_UNKNOWN, the data type of the output is determined by\n
1353 *   <b>type</b>. When type==DATA_TYPE_UNKNOWN,
1354 *   the data type of the output depends on the data type converted during the calculation of <b>x1</b> and <b>x2</b>.
1355 *
1356 * @since 3.2
1357 * @version 2.1
1358 */
1359struct MatMulFusion
1360{
1361    /** Whether to transpose the <b>x1</b> matrix. */
1362    boolean transposeA;
1363    /** Whether to transpose the <b>x2</b> matrix. */
1364    boolean transposeB;
1365    /** Activation function type. For details, see {@link ActivationType}. */
1366    enum ActivationType activationType;
1367};
1368
1369/**
1370 * @brief Calculates the maximum of <b>x1</b> and <b>x2</b> element-wise. The inputs of <b>x1</b> and <b>x2</b>\n
1371 * comply with the implicit type conversion rules to make the data types are consistent.
1372 *
1373 * The input must be two tensors or one tensor and one scalar. When the input is two tensors, the data types\n
1374 * cannot be Boolean at the same time, and their shapes can be broadcast to the same size. When the inputs are\n
1375 * one tensor and one scalar, the scalar must be a constant.
1376 *
1377 * The {@link NodeType} of this operator is <b>NODE_TYPE_MAXIMUM</b>.
1378 *
1379 * Input:
1380 *
1381 * * <b>x1</b>, an n-dimensional tensor, whose data type can be number or Boolean.
1382 * * <b>x2</b>, an n-dimensional tensor, whose data type can be number or Boolean.
1383 *
1384 * Output:
1385 *
1386 * * Maximum value of the elements of the two tensors.
1387 *
1388 * @since 3.2
1389 * @version 2.1
1390 */
1391struct Maximum
1392{
1393};
1394
1395/**
1396 * @brief Applies a 2D maximum pooling over an input tensor.
1397 *
1398 * The {@link NodeType} of this operator is <b>NODE_TYPE_MAX_POOL_FUSION</b>.
1399 *
1400 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.\n
1401 * In other cases, <b>padding</b> must be <b>0</b>.
1402 *
1403 * Input:
1404 *
1405 * * <b>x</b>, an n-dimensional tensor.
1406 *
1407 * Output:
1408 *
1409 * * Result tensor after max-pooling.
1410 *
1411 * @since 3.2
1412 * @version 2.1
1413 */
1414struct MaxPoolFusion
1415{
1416    /** Size of the kernel used to take the maximum value. It is an int array [kernel_height, kernel_weight]\n
1417      * with length of 2.
1418      */
1419    long[] kernelSize;
1420    /** Distance of kernel moving. It is an int array with two elements. */
1421    long[] strides;
1422    /** Array to pad. */
1423    long[] pad;
1424    /** Padding mode. For details, see {@link PadMode}. */
1425    enum PadMode padMode;
1426    /** Format of the tensor data. For details, see {@link Format}. */
1427    enum Format format;
1428    /** RoundMode mode. For details, see {@link RoundMode}. */
1429    enum RoundMode roundMode;
1430    /** Whether to do global pooling. */
1431    boolean global;
1432    /** Activation function type. For details, see {@link ActivationType}. */
1433    enum ActivationType activationType;
1434};
1435
1436/**
1437 * @brief Calculates the minimum of <b>x1</b> and <b>x2</b> element-wise. The inputs of <b>x1</b> and <b>x2</b>\n
1438 * comply with the implicit type conversion rules to make the data types are consistent.
1439 *
1440 * The input must be two tensors or one tensor and one scalar. When the input is two tensors, the data types\n
1441 * cannot be Boolean at the same time, and their shapes can be broadcast to the same size. When the inputs are\n
1442 * one tensor and one scalar, the scalar must be a constant.
1443 *
1444 * The {@link NodeType} of this operator is <b>NODE_TYPE_MINIMUM</b>.
1445 *
1446 * Input:
1447 *
1448 * * <b>x1</b>, an n-dimensional tensor, whose data type can be number or Boolean.
1449 * * <b>x2</b>, an n-dimensional tensor, whose data type can be number or Boolean.
1450 *
1451 * Output:
1452 *
1453 * * Minimum value of the elements of the two tensors.
1454 *
1455 * @since 5.0
1456 * @version 2.1
1457 */
1458struct Minimum
1459{
1460};
1461
1462/**
1463 * @brief Computes the remainder of dividing the first input tensor by the second input tensor element-wise.
1464 *
1465 * Inputs of x and y comply with the implicit type conversion rules to make the data types consistent.
1466 * The inputs must be two tensors or one tensor and one scalar. When the inputs are two tensors,
1467 * both dtypes cannot be bool, and the shapes of them could be broadcast.
1468 * When the inputs are one tensor and one scalar, the scalar could only be a constant.
1469 *
1470 * The {@link NodeType} of this operator is <b>NODE_TYPE_MOD</b>.
1471 *
1472 * Input:
1473 *
1474 * * <b>x</b>, a number, a bool or a tensor whose data type is number.
1475 * * <b>y</b>, if input <b>x</b> is a tensor, <b>y</b> could be a number, a bool or a tensor whose data type is number.
1476 *   If input <b>x</b> is a number or a bool, <b>y</b> must be a tensor whose data type is number.
1477 *
1478 * Output:
1479 *
1480 * * Result tensor, the shape is the same as the one after broadcasting, and the data type is the one with\n
1481  higher precision or higher digits among the two inputs.
1482 *
1483 * @since 5.0
1484 * @version 2.1
1485 */
1486struct Mod
1487{
1488};
1489
1490/**
1491 * @brief Multiplies the elements in the same position of <b>x1</b> and <b>x2</b> to obtain output.
1492 *
1493 * If the shapes of <b>x1</b> and <b>x2</b> are different, expand <b>x1</b> and <b>x2</b> to the same shape through\n
1494 * broadcast and then perform the multiplication.
1495 * The {@link NodeType} of this operator is <b>NODE_TYPE_MUL_FUSION</b>.
1496 *
1497 * Input:
1498 *
1499 * * <b>x1</b>, a tensor of the int or float type.
1500 * * <b>x2</b>, a tensor of the int or float type.
1501 *
1502 * Output:
1503 *
1504 * * Product of each element of <b>x1</b> and <b>x2</b>.
1505 *
1506 * @since 3.2
1507 * @version 2.1
1508 */
1509struct MulFusion
1510{
1511    /** Activation function type. For details, see {@link ActivationType}. */
1512    enum ActivationType activationType;
1513};
1514
1515/**
1516 * @brief Calculate the opposite value of the input tensor element-wise.
1517 *
1518 * The {@link NodeType} of this operator is <b>NODE_TYPE_NEG</b>.
1519 *
1520 * Input:
1521 *
1522 * * <b>x</b>, a tensor of the int or float type.
1523 *
1524 * Output:
1525 *
1526 * * Result tensor, has the same shape and dtype as input <b>x</b>.
1527 *
1528 * @since 5.0
1529 * @version 2.1
1530 */
1531struct Neg
1532{
1533};
1534
1535/**
1536 * @brief Returns the tensor resulted from performing the not-equal logical operation elementwise\n
1537 * on the input tensors <b>x1</b> and <b>x2</b>.
1538 *
1539 * The {@link NodeType} of this operator is <b>NODE_TYPE_NOT_EQUAL</b>.
1540 *
1541 * Input:
1542 *
1543 * * <b>x1</b>, the first input operand.
1544 * * <b>x2</b>, the second input operand.
1545 *
1546 * Output:
1547 *
1548 * * Result tensor.
1549 *
1550 * @since 5.0
1551 * @version 2.1
1552 */
1553struct NotEqual
1554{
1555};
1556
1557/**
1558 * @brief Generates a one-hot tensor based on the specified locations.
1559 *
1560 * The locations specified by <b>indices</b> are determined by <b>onValue</b>, and other locations are determined\n
1561 * by <b>offValue</b>.
1562 *
1563 * The {@link NodeType} of this operator is <b>NODE_TYPE_ONE_HOT</b>.
1564 *
1565 * Input:
1566 *
1567 * * <b>indices</b>, an n-dimensional tensor. Each element in <b>indices</b> determines the location of <b>onValue</b>\n
1568 *   in each one-hot vector.
1569 * * <b>depth</b>, an integer scalar that determines the depth of the one-hot vector. The value of <b>depth</b> must be\n
1570 *   greater than <b>0</b>.
1571 * * <b>onValue</b>, a scalar that specifies a valid value in the one-hot vector.
1572 * * <b>offValue</b>, a scalar that specifies the values of other locations in the one-hot vector except the valid value.
1573 *
1574 * Output:
1575 *
1576 * * An (n+1)-dimensional tensor if <b>indices</b> is an n-dimensional tensor. The output shape is determined by\n
1577 *   <b>indices</b> and <b>axis</b>.
1578 *
1579 * @since 3.2
1580 * @version 2.1
1581 */
1582struct OneHot
1583{
1584    /**
1585     * An integer scalar that specifies the dimension for inserting the one-hot.
1586     * Assume that the shape of <b>indices</b> is [N, C],\n
1587     * and the value of <b>depth</b> is D.
1588     * When <b>axis</b> is <b>0</b>, the shape of the output is [D, N, C].
1589     * When <b>axis</b> is <b>-1</b>, the shape of the output is [N, C, D].
1590     * When <b>axis</b> is <b>-1</b>, the shape of the output is [N, D, C].
1591     */
1592    long axis;
1593};
1594
1595/**
1596 * @brief Pads the input tensor.
1597 *
1598 * The {@link NodeType} of this operator is <b>NODE_TYPE_PAD_FUSION</b>.
1599 *
1600 * When paddingMode==PADDING_MODE_CONSTANT, <b>constantValue</b> must be set.
1601 * The default value of <b>constantValue</b> is <b>0</b>.
1602 *
1603 * Input:
1604 *
1605 * * <b>x</b>, an n-dimensional tensor.
1606 * * <b>paddings</b>, a 2D tensor that specifies the length to add in each dimension.
1607 *   The shape is [n, 2]. <b>paddings[i][0]</b> indicates the number of paddings to add before the input\n
1608 *   tensor in ith dimension.
1609 * * <b>paddings[i][1]</b> indicates the number of paddings to add after the input tensor in ith dimension.
1610 *
1611 * Output:
1612 *
1613 * * An n-dimensional tensor after padding, with the same dimensions and data type as <b>x</b>.
1614 *   The shape is determined by <b>x</b> and <b>paddings</b>.
1615 *   output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]
1616 *
1617 * @since 3.2
1618 * @version 2.1
1619 */
1620struct PadFusion
1621{
1622    /**
1623     * A 2D tensor, specifying the length to add in each dimension. The shape is [n, 2]. <b>paddings[i][0]</b>
1624     * indicates the number of paddings to add before the input <b>x</b> in the ith dimension.
1625     * <b>paddings[i][1]</b> indicates the number of paddings to add after the input <b>x</b> in the ith dimension.
1626     * The meaning of this parameter is the same as that of <b>paddings</b> input.
1627     */
1628    long[][] paddings;
1629    /**
1630     * Padding mode.
1631     * For details, see {@link PaddingMode}.
1632     */
1633    enum PaddingMode paddingMode;
1634    /**
1635     * A constant with the same data type as <b>x</b>. It specifies the value to add in the pad operation.
1636     * This parameter is valid only when paddingMode==PADDING_MODE_CONSTANT. The default value is <b>0</b>.
1637     */
1638    float constantValue;
1639};
1640
1641/**
1642 * @brief Calculates the <b>y</b> power of each element in <b>x</b>. The inputs must be two tensors or one tensor
1643 * and one scalar.
1644 *
1645 * When the inputs are two tensors, their data types cannot be Boolean at the same time, and their shapes
1646 * must be the same.
1647 * When the inputs are one tensor and one scalar, the scalar must be a constant.
1648 *
1649 * The {@link NodeType} of this operator is <b>NODE_TYPE_POW_FUSION</b>.
1650 *
1651 * The <b>x' = scale*x+shift</b> operation is performed for each element of <b>x</b>, and then the <b>y</b> power of
1652 * <b>x'</b> is calculated.
1653 *
1654 * Input:
1655 *
1656 * * <b>x</b>, a number, a Boolean value, or a tensor whose data type is number or Boolean.
1657 * * <b>y</b>, a number, a Boolean value, or a tensor whose data type is number or Boolean.
1658 *
1659 * Output:
1660 *
1661 * * A tensor, whose shape is determined by the shape of <b>x</b> and <b>y</b> after broadcasting.
1662 *
1663 * @since 3.2
1664 * @version 2.1
1665 */
1666struct PowFusion
1667{
1668    /** Scale the value of <b>x</b>. */
1669    float scale;
1670    /** Increase or decrease the value of <b>x</b> after scaling. */
1671    float shift;
1672};
1673
1674/**
1675 * @brief Applies the PReLU activation of <b>x</b> and <b>weight</b>.
1676 *
1677 * The {@link NodeType} of this operator is <b>NODE_TYPE_PRELU_FUSION</b>.
1678 *
1679 * Input:
1680 *
1681 * * <b>x</b>, an n-dimensional tensor. If <b>n</b> is greater than or equal to 2, <b>x</b> must be
1682 *   [BatchSize, ..., Channels]. The second dimension is the number of channels.
1683 * * <b>weight</b>, a 1D tensor. The length of <b>weight</b> must be 1 or equal to the number of channels.
1684 *   If the length of <b>weight</b> is 1, all channels share the same weight.
1685 *   If the length of <b>weight</b> is equal to the number of channels, each channel exclusively has a weight.
1686 *   If <b>n</b> of <b>x</b> is less than 2, the <b>weight</b> length must be 1.
1687 *
1688 * Output:
1689 *
1690 * * PReLU activation value of <b>x</b>, with the same shape and data type as <b>x</b>.
1691 *
1692 * @since 3.2
1693 * @version 2.1
1694 */
1695struct PReLUFusion
1696{
1697    /**
1698     * Whether to enable weight sharing for the parameter validity check.
1699     * If the length of <b>weight</b> is <b>1</b>, <b>channelShared</b> must be <b>true</b>.
1700     * Otherwise, <b>channelShared</b> is <b>false</b>.
1701     */
1702    boolean channelShared;
1703};
1704
1705/**
1706 * @brief Converts the data type.
1707 *
1708 * The {@link NodeType} of this operator is <b>NODE_TYPE_QUANT_DTYPE_CAST</b>.
1709 *
1710 * Input:
1711 *
1712 * * <b>x</b>, an n-dimensional tensor.
1713 *
1714 * Output:
1715 *
1716 * * Tensor after the data type conversion.
1717 *
1718 * @since 3.2
1719 * @version 2.1
1720 */
1721struct QuantDTypeCast
1722{
1723    /** Data type of the input tensor. */
1724    long srcT;
1725    /** Data type of the output tensor. */
1726    long dstT;
1727};
1728
1729/**
1730 * @brief Calculate the rank of a tensor.
1731 *
1732 * The rank of a tensor is the number of indices required to uniquely select each element of the tensor.
1733 *
1734 * The {@link NodeType} of this operator is <b>NODE_TYPE_RANK</b>.
1735 *
1736 * Input:
1737 *
1738 * * <b>x</b>, an n-dimensional tensor.
1739 *
1740 * Output:
1741 *
1742 * * Result tensor. 0-D int32 Tensor representing the rank of input.
1743 *
1744 * @since 5.0
1745 * @version 2.1
1746 */
1747struct Rank
1748{
1749};
1750
1751/**
1752 * @brief Generate a tensor containing a sequence of numbers that begin at <b>start</b>\n
1753 * and extends by increments of <b>delta</b> up to <b>limit<b>.
1754 *
1755 * The {@link NodeType} of this operator is <b>NODE_TYPE_RANGE</b>.
1756 *
1757 * Input:
1758 *
1759 * * <b>x</b> an n-dimensional tensor.
1760 *
1761 * Output:
1762 *
1763 * * A 1-D tensor with specific data type containing generated range of values.
1764 *
1765 * @since 5.0
1766 * @version 2.1
1767 */
1768struct Range
1769{
1770    /** Reserved dataType parameter. */
1771    long dataType;
1772    /** Scalar. First entry for the range of output values. */
1773    long start;
1774    /** Scalar. Exclusive upper limit for the range of output values. */
1775    long limit;
1776    /** Scalar. Value to step by. */
1777    long delta;
1778};
1779
1780/**
1781 * @brief Calculate reciprocal of a tensor element-wise.
1782 *
1783 * The {@link NodeType} of this operator is <b>NODE_TYPE_RECIPROCAL</b>.
1784 *
1785 * Input:
1786 *
1787 * * <b>x</b>, an n-dimensional tensor.
1788 *
1789 * Output:
1790 *
1791 * * Result tensor with the same shape as the input <b>x</b>.
1792 *
1793 * @since 5.0
1794 * @version 2.1
1795 */
1796struct Reciprocal
1797{
1798};
1799
1800/**
1801 * @brief Return x / y element-wise for real types. If x and y are reals, this will return floating-point division.
1802 *
1803 * The {@link NodeType} of this operator is <b>NODE_TYPE_REAL_DIV</b>.
1804 *
1805 * Input:
1806 *
1807 * * <b>x</b>, an n-dimensional tensor.
1808 * * <b>y</b>, an n-dimensional tensor. Has the same data type as x.
1809 *
1810 * Output:
1811 *
1812 * * A tensor. Has the same data type as x.
1813 *
1814 * @since 5.0
1815 * @version 2.1
1816 */
1817struct RealDiv
1818{
1819};
1820
1821/**
1822 * @brief Reduces the dimensions of a tensor.
1823 *
1824 * The {@link NodeType} of this operator is <b>NODE_TYPE_REDUCE_FUSION</b>.
1825 * If <b>mode</b> is <b>REDUCE_ALL</b>, <b>REDUCE_PROD</b>, or <b>REDUCE_MEAN</b> and <b>reduceToEnd</b>
1826 * is <b>true</b>, the output is the reduced value multiplied by <b>coeff</b>.
1827 *
1828 * Input:
1829 *
1830 * * <b>x</b>, an n-dimensional tensor, where <b>n</b> is less than 8.
1831 * * <b>axis</b>, a 1D tensor that specifies the dimension to reduce. The value range of each element in axis
1832 *   is [–n, n).
1833 *
1834 * Output:
1835 *
1836 * * An m-dimensional tensor, with the same data type as <b>x</b>. If <b>keepDims</b> is <b>false</b>, m < n.
1837 *   If <b>keepDims</b> is <b>true</b>, m==n.
1838 *
1839 * @since 3.2
1840 * @version 2.1
1841 */
1842struct ReduceFusion
1843{
1844    /** Whether to keep the dimensions remain unchanged. */
1845    boolean keepDims;
1846    /** Algorithm used to reduce the tensor dimensions. For details, see {@link ReduceMode}. */
1847    enum ReduceMode mode;
1848    /**
1849     * If this parameter is set to <b>true</b>, the first element is obtained from axis and set to <b>i</b>,
1850     * and then <b>axis</b> will be changed to [i,i+1, ...,n-1,n].
1851     * For example, if <b>reduceToEnd</b> is <b>true</b>, <b>axis</b> is [2,4], and the number of dimensions of
1852     * <b>x</b> is 7, then <b>axis</b> will be [2,3,4,5,6].
1853     */
1854    boolean reduceToEnd;
1855    /** Coefficient. */
1856    float coeff;
1857};
1858
1859/**
1860 * @brief Reshapes a tensor.
1861 *
1862 * The {@link NodeType} of this operator is <b>NODE_TYPE_RESHAPE</b>.
1863 *
1864 * Input:
1865 *
1866 * * <b>x</b>, an n-dimensional tensor.
1867 * * <b>InputShape</b>, a 1D tensor that specifies the shape of the output tensor. It must be a constant.
1868 *
1869 * Output:
1870 *
1871 * * A tensor of the specified shape. The data type is the same as that of <b>x</b>.
1872 *
1873 * @since 3.2
1874 * @version 2.1
1875 */
1876struct Reshape
1877{
1878};
1879
1880/**
1881 * @brief Resizes a tensor.
1882 *
1883 * The {@link NodeType} of this operator is <b>NODE_TYPE_RESIZE</b>.
1884 *
1885 * The parameter combination of this operator can implement the <b>Resize</b> function.
1886 * For example, to implement bilinear interpolation on the four corners of an image that is precisely aligned, set:
1887 * method = RESIZE_METHOD_LINEAR
1888 * coordinateTransformMode = COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS
1889 *
1890 * Input:
1891 *
1892 * * <b>x</b>, a 4D tensor in the [batchSize, height, width, channels] (NHWC) format.
1893 *
1894 * Output:
1895 *
1896 * * An n-dimensional tensor, with the same shape and data type as <b>x</b>.
1897 *
1898 * @since 3.2
1899 * @version 2.1
1900 */
1901struct Resize
1902{
1903    /** Method used for resizing. For details, see {@link ResizeMethod}. */
1904    enum ResizeMethod method;
1905    /** Height of the 4D tensor after resizing. */
1906    long newHeight;
1907    /** Width of the 4D tensor after resizing. */
1908    long newWidth;
1909    /** Whether to maintain the height/width ratio of <b>x</b> after resizing. */
1910    boolean preserveAspectRatio;
1911    /**
1912     * Coordinate transformation method. For details, see {@link CoordinateTransformMode}.
1913     */
1914    enum CoordinateTransformMode coordinateTransformMode;
1915    /** Cubic coefficient, used when <b>method</b> is <b>RESIZE_METHOD_CUBIC</b>. */
1916    float cubicCoeff;
1917    /** When excludeOutside==1, the sampling weight that exceeds the boundary of <b>x</b> is set to <b>0</b>,
1918      * and other weights are normalized.
1919      */
1920    long excludeOutside;
1921    /** Value to interpolate, which is used only when <b>x</b> is cropped. The sampling weight that exceeds the
1922      * boundary is set to <b>extrapolationValue</b>.
1923      */
1924    float extrapolationValue;
1925    /** Nearest neighbor interpolation algorithm, used when <b>method</b> is <b>RESIZE_METHOD_NEAREST</b>.
1926      * For details, see {@link NearestMode}.
1927      */
1928    enum NearestMode nearestMode;
1929};
1930
1931/**
1932 * @brief Calculate half to even of a tensor element-wise.
1933 *
1934 * The {@link NodeType} of this operator is <b>NODE_TYPE_ROUND</b>.
1935 *
1936 * Input:
1937 *
1938 * * <b>x</b>, an n-dimensional tensor.
1939 *
1940 * Output:
1941 *
1942 * * Result tensor with the same shape as the input <b>x</b>.
1943 *
1944 * @since 5.0
1945 * @version 2.1
1946 */
1947struct Round
1948{
1949};
1950
1951/**
1952 * @brief Calculates the reciprocal of the square root of a tensor.
1953 *
1954 * The {@link NodeType} of this operator is <b>NODE_TYPE_RSQRT</b>.
1955 *
1956 * Input:
1957 *
1958 * *<b>x</b>, an n-dimensional tensor, where <b>n</b> is less than 8. Each element of the tensor cannot be less than 0.
1959 *
1960 * Output:
1961 *
1962 * * An n-dimensional tensor, with the same shape and data type as <b>x</b>.
1963 *
1964 * @since 3.2
1965 * @version 2.1
1966 */
1967struct Rsqrt
1968{
1969};
1970
1971/**
1972 * @brief Scales a tensor.
1973 *
1974 * The {@link NodeType} of this operator is <b>NODE_TYPE_SCALE_FUSION</b>.
1975 *
1976 * Input:
1977 *
1978 * * <b>x</b>, an n-dimensional tensor.
1979 * * <b>scale</b>, the scaling tensor.
1980 * * <b>bias</b>, the bias tensor.
1981 *
1982 * Output:
1983 *
1984 * * An n-dimensional tensor scaled, whose data type is the same as that of <b>x</b>x and shape is determined
1985 *   by <b>axis</b>.
1986 *
1987 * @since 3.2
1988 * @version 2.1
1989 */
1990struct ScaleFusion
1991{
1992    /** Dimensions to scale. */
1993    long axis;
1994    /** Activation function type. For details, see {@link ActivationType}. */
1995    enum ActivationType activationType;
1996};
1997
1998/**
1999 * @brief Scatters a tensor into a new tensor depending on the specified indices.
2000 *
2001 * The {@link NodeType} of this operator is <b>NODE_TYPE_SCATTER_ND</b>.
2002 *
2003 * Input:
2004 *
2005 * * <b>indices</b>, the index of scattering in the new tensor with int32 or int64 data type.
2006 *   The rank of indices must be at least 2 and indices_shape[-1] <= len(shape).
2007 * * <b>updates</b>, the source tensor to be scattered. It has shape indices_shape[:-1] + shape[indices_shape[-1]:].
2008 * * <b>shape</b>, the shape of the output tensor, has the same data type as <b>indices</b>.
2009 *
2010 * Output:
2011 *
2012 * * Result tensor with the same type as <b>update</b> and the same shape as <b>shape</b>.
2013 *
2014 * @since 5.0
2015 * @version 2.1
2016 */
2017struct ScatterNd
2018{
2019};
2020
2021/**
2022 * @brief Returns the share of the input tensor.
2023 *
2024 * The {@link NodeType} of this operator is <b>NODE_TYPE_SHAPE</b>.
2025 *
2026 * Input:
2027 *
2028 * * <b>x</b>, an n-dimensional tensor.
2029 *
2030 * Output:
2031 *
2032 * * An integer array representing the dimensions of <b>x</b>.
2033 *
2034 * @since 3.2
2035 * @version 2.1
2036 */
2037struct Shape
2038{
2039};
2040
2041/**
2042 * @brief Calculate sine of the input element-wise.
2043 *
2044 * The {@link NodeType} of this operator is <b>NODE_TYPE_SIN</b>.
2045 *
2046 * Input:
2047 *
2048 * * <b>x</b>, an n-dimensional tensor.
2049 *
2050 * Output:
2051 *
2052 * * Result tensor with the same shape as the input <b>x</b>.
2053 *
2054 * @since 5.0
2055 * @version 2.1
2056 */
2057struct Sin
2058{
2059};
2060
2061/**
2062 * @brief Slices a tensor of the specified size.
2063 *
2064 * The {@link NodeType} of this operator is <b>NODE_TYPE_SLICE_FUSION</b>.
2065 *
2066 * Input:
2067 *
2068 * * <b>x</b>, an n-dimensional tensor.
2069 * * <b>begin</b>, an array of integers greater than or equal to 0, specifying the start of the slice.
2070 * * <b>size</b>, an array of integers greater than or equal to 0, specifying the length of the slice.
2071 *   Assume that a dimension is <b>i</b> and 1<=size[i]<=input.shape[i]-begin[i].
2072 *
2073 * Output:
2074 *
2075 * * An n-dimensional tensor obtained.
2076 *
2077 * @since 3.2
2078 * @version 2.1
2079 */
2080struct SliceFusion
2081{
2082    /** Dimensions on which the tensor is sliced. */
2083    long[] axes;
2084};
2085
2086/**
2087 * @brief Applies the <b>softmax</b> operation on a tensor.
2088 *
2089 * The {@link NodeType} of this operator is <b>NODE_TYPE_SOFTMAX</b>.
2090 *
2091 * Input:
2092 *
2093 * * <b>x</b>, an n-dimensional tensor.
2094 *
2095 * Output:
2096 *
2097 * * Result of the <b>softmax</b> operation. It is an n-dimensional tensor with the same data type and shape
2098 *   as <b>x</b>.
2099 *
2100 * @since 3.2
2101 * @version 2.1
2102 */
2103struct Softmax
2104{
2105    /** Dimensions on which the <b>softmax</b> operation is performed. It is an integer in the range [-n, n). */
2106    long[] axis;
2107};
2108
2109/**
2110 * @brief Splits a 4D tensor into multiple blocks in the spatial dimension and then concatenates these blocks
2111 * in the batch dimension.
2112 *
2113 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPACE_TO_BATCH_ND</b>.
2114 *
2115 * Input:
2116 *
2117 * * <b>x</b>, an n-dimensional tensor.
2118 *
2119 * Output:
2120 *
2121 * A 4D tensor with the same data type as <b>x</b>. The shape is determined by <b>input</b>, <b>blockShape</b>,
2122 * and <b>paddings</b>. Assume that the input shape is [n,c,h,w], then:
2123 * \f$ output.shape[0] = n * blockShape[0] * blockShape[1]\f$<br>
2124 * \f$ output.shape[1] = c \f$<br>
2125 * \f$ output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0] \f$<br>
2126 * \f$ output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1] \f$<br>
2127 * \f$ (h + paddings[0][0] + paddings[0][1]) must be an integer multiple of \f$ blockShape[0]\f$, and
2128 * (w + paddings[1][0] + paddings[1][1]) \f$ must be an integer multiple of \f$ blockShape[1] \f$.
2129 *
2130 * @since 3.2
2131 * @version 2.1
2132 */
2133struct SpaceToBatchND
2134{
2135    /** Number of blocks. The value must be greater than 1. */
2136    long[] blockShape;
2137    /** Padding size for spatial dimensions. */
2138    long[][] paddings;
2139};
2140
2141/**
2142 * @brief Rearrange blocks of spatial data into depth.
2143 *
2144 * The output tensor’s height dimension is height / blockSize;
2145 * The output tensor’s weight dimension is weight / blockSize;
2146 * The depth of output tensor is blockSize * blockSize * inputDepth;
2147 * The input tensor’s height and width must be divisible by blockSize.
2148 *
2149 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPACE_TO_DEPTH</b>.
2150 *
2151 * Input:
2152 *
2153 * * <b>x</b>, an 4-dimensional tensor.
2154 *
2155 * Output:
2156 *
2157 * * Result tensor with the same dataType as the input <b>x</b>.
2158 *
2159 * @since 5.0
2160 * @version 2.1
2161 */
2162struct SpaceToDepth
2163{
2164    /** The block size used to divide spatial data. It must be >= 2. */
2165    long blockSize;
2166    /** Format of the tensor data. For details, see {@link Format}. */
2167    enum Format format;
2168};
2169
2170/**
2171 * @brief Converts a sparse representation into a dense tensor.
2172 *
2173 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPARSE_TO_DENSE</b>.
2174 *
2175 * Input:
2176 *
2177 * * <b>indices</b>, 2-dimension tensor, represents the position of the element in the sparse tensor.
2178 * * <b>values</b>, 1-dimension Tensor, represents the value corresponding to the position in the indices.
2179 * * <b>sparseShape</b>, a positive int tuple which specifies the shape of sparse tensor,
2180 *   should have 2 elements, represent sparse tensor shape is (N, C).
2181 *
2182 * Output:
2183 *
2184 * * Result tensor converted from sparse tensor with dtype samed as <b>values</b>, and the shape is <b>sparseShape</b>.
2185 *
2186 * @since 5.0
2187 * @version 2.1
2188 */
2189struct SparseToDense
2190{
2191};
2192
2193/**
2194 * @brief Splits a tensor into multiple tensors along the axis dimension. The number of tensors is
2195 * specified by <b>outputNum</b>.
2196 *
2197 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPLIT</b>.
2198 *
2199 * Input:
2200 *
2201 * * <b>x</b>, an n-dimensional tensor.
2202 *
2203 * Output:
2204 *
2205 * * An array of n-dimensional tensors, with the same data type and dimensions.
2206 *   The data type of each tensor is the same as that of <b>x</b>.
2207 *
2208 * @since 3.2
2209 * @version 2.1
2210 */
2211struct Split
2212{
2213    /** Number of output sensors. */
2214    long outputNum;
2215    /**
2216     * Size of each tensor to output.
2217     * If <b>sizeSplits</b> is empty, <b>x</b> will be evenly split into tensors of the same size.
2218     * In this case, x.shape[axis] must be an integer multiple of <b>outputNum</b>.
2219     * If <b>sizeSplits</b> is not empty, the sum of all elements must be equal to x.shape[axis].
2220     */
2221    long[] sizeSplits;
2222    /** Target axis on which <b>x</b> is split. The data type is int. */
2223    long axis;
2224};
2225
2226/**
2227 * @brief Calculates the square root of a tensor.
2228 *
2229 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQRT</b>.
2230 *
2231 * Input:
2232 *
2233 * * <b>x</b>, an n-dimensional tensor.
2234 *
2235 * Output:
2236 *
2237 * * An n-dimensional tensor, with the same data type and shape as <b>x</b>.
2238 *
2239 * @since 3.2
2240 * @version 2.1
2241 */
2242struct Sqrt
2243{
2244};
2245
2246/**
2247 * @brief Calculates the square of a tensor.
2248 *
2249 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQUARE</b>.
2250 *
2251 * Input:
2252 *
2253 * * <b>x</b>, an n-dimensional tensor.
2254 *
2255 * Output:
2256 *
2257 * * An n-dimensional tensor, with the same data type and shape as <b>x</b>.
2258 *
2259 * @since 5.0
2260 * @version 2.1
2261 */
2262struct Square
2263{
2264};
2265
2266/**
2267 * @brief Calculates the square of the difference between two tensors.
2268 * The <b>SquaredDifference</b> operator supports subtraction between tensors.
2269 *
2270 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQUEEZE</b>.
2271 *
2272 * Input:
2273 *
2274 * * <b>x</b>, a tensor representing the minuend, which can be a number or a Boolean value.
2275 * * <b>x</b>, a tensor representing the subtrahend, which can be a number or a Boolean value.
2276 *
2277 * Output:
2278 *
2279 * * A tensor obtained. The shape of the output is determined by <b>x</b> and <b>y</b>.
2280 *   If <b>x</b> and <b>y</b> are of the same shape, the output shape is the same as that of <b>x</b> and <b>y</b>.
2281 *   If <b>x</b> and <b>y</b> are of different types, you need to perform the broadcast operation on
2282 *   <b>x</b> and <b>y</b> first.
2283 *   The precision of the output is the same as the input tensor with higher precision.
2284 *
2285 * @since 3.2
2286 * @version 2.1
2287 */
2288struct SquaredDifference
2289{
2290};
2291
2292/**
2293 * @brief Removes the dimension with length of 1 from the specified axis. The int8 quantization input is supported.
2294 *
2295 * Assume that the shape of <b>x</b> is [2, 1, 1, 2, 2] and <b>axis</b> is [0,1], the shape of the output tensor\n
2296 * must be [2, 1, 2, 2]. That is, the dimension with the length of 1 between the 0th and 1st dimensions is removed.
2297 *
2298 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQUARED_DIFFERENCE</b>.
2299 *
2300 * Input:
2301 *
2302 * * <b>x</b>, an n-dimensional tensor.
2303 *
2304 * Output:
2305 *
2306 * *Tensor obtained.
2307 *
2308 * @since 3.2
2309 * @version 2.1
2310 */
2311struct Squeeze
2312{
2313    /** Axis on which the dimension of length 1 is to be removed. The value can be an integer or an array.
2314      * The value range of the integer is [-n, n).
2315      */
2316    long[] axis;
2317};
2318
2319/**
2320 * @brief Stacks multiple tensors along the specified axis. If the number of dimensions of each tensor is <b>n</b>
2321 * before stacking, the number of dimensions of the output tensor is <b>n</b>+1.
2322 *
2323 * The {@link NodeType} of this operator is <b>NODE_TYPE_STACK</b>.
2324 *
2325 * Input:
2326 *
2327 * * Multiple n-dimensional tensors, which are of the same data type and shape.
2328 *
2329 * Output:
2330 *
2331 * * An n+1D tensor along the specified axis, with the same data type and precision as the input tensors.
2332 *
2333 * @since 3.2
2334 * @version 2.1
2335 */
2336struct Stack
2337{
2338    /** An integer that specifies the dimension for tensor stacking. The value range is [-(n+1),(n+1)),
2339      * which means a negative number is allowed.
2340      */
2341    long axis;
2342};
2343
2344/**
2345 * @brief Slices a tensor at the specified intervals.
2346 *
2347 * The {@link NodeType} of this operator is <b>NODE_TYPE_STRIDED_SLICE</b>.
2348 *
2349 * Input:
2350 *
2351 * * <b>x</b>, an n-dimensional tensor.
2352 * * <b>begin</b>, a 1D tensor that specifies the position from which the tensor is sliced.
2353 *   The length of <b>begin</b> is <b>n</b>. begin[i] specifies the start point to slice in the ith dimension.
2354 * * <b>end</b>, a 1D tensor that specifies the end to which the tensor is sliced. The length of <b>end</b> is <b>n</b>.
2355 *   end[i] specifies the end point to slice in the ith dimension.
2356 * * <b>strides</b>, a 1D tensor that specifies the intervals for slicing. The length of <b>strides</b> is <b>n</b>.
2357 *   strides[i] specifies the intervals at which the tensor is sliced in the ith dimension. Negative values are allowed.
2358 *
2359 * For the input tensor, the shapes of <b>begin</b>, <b>end</b>, and <b>strides</b> must be the same.
2360 * The indices of <b>begin</b> and <b>end</b> start from <b>0</b>. The elements of <b>strides</b> cannot be <b>0</b>.
2361 *
2362 * Output:
2363 *
2364 * * A tensor, with the same data type as <b>x</b>. The number of dimensions of the output tensor is rank (x[0])+1.
2365 *
2366 * @since 3.2
2367 * @version 2.1
2368 */
2369struct StridedSlice
2370{
2371    /**
2372     * Mask of <b>begin</b>.
2373     * <b>beginMask</b> identifies different dimensions of <b>x</b> in binary code. For example, if bit i of <b>beginMask</b>
2374     * is set to <b>1</b>, the setting of <b>begin</b> in the ith dimension will be invalid, which means the start index of
2375     * that dimension is <b>0</b>. The default value is <b>0</b>.
2376     */
2377    long beginMask;
2378    /**
2379     * Mask of <b>end</b>. The parameter is similar to <b>beginMask</b>.
2380     * <b>endMask</b> identifies different dimensions of <b>x</b> in binary code. For example, if bit i of <b>endMask</b> is
2381     * set to <b>1</b>, the setting of <b>end</b> in the ith dimension will be invalid, which means the tensor will be
2382     * sliced till the end in that dimension. The default value is <b>0</b>.
2383     */
2384    long endMask;
2385    /**
2386     * An integer used to mask <b>begin</b> and <b>end</b>.
2387     * The tensor does not need to be sliced in the non-zero dimensions.
2388     * <b>ellipsisMask</b> is represented in binary code. If bit i of <b>ellipsisMask</b> is <b>1</b>, elements are sliced
2389     * from the first element at strides[i] in the ith dimension until the tensor boundary.
2390     */
2391    long ellipsisMask;
2392    /**
2393     * Used to add a dimension.
2394     * <b>newAxisMask</b> identifies different dimensions of <b>x</b> in binary code. If bit i is <b>1</b>, the settings of
2395     * <b>begin</b>, <b>end</b>, and <b>strides</b> are invalid for all dimensions, and a dimension with size of 1 is added
2396     * to bit i.
2397     */
2398    long newAxisMask;
2399    /**
2400     * Used to shrink the specified dimension.
2401     * <b>shrinkAxisMask</b> is represented in binary code. If the ith bit of the <b>shrinkAxisMask</b> is <b>1</b>, all
2402     * elements in the ith dimension will be discarded, and the length of the ith dimension is shrunk to <b>1</b>.
2403     */
2404    long shrinkAxisMask;
2405};
2406
2407/**
2408 * @brief Calculates the difference between two tensors.
2409 *
2410 * The {@link NodeType} of this operator is <b>NODE_TYPE_SUB_FUSION</b>.
2411 *
2412 * Input:
2413 *
2414 * * <b>x</b>, the minuend, which is a tensor of the int or float type.
2415 * * <b>y</b>, the subtrahend, which is a tensor of the int or float type.
2416 *
2417 * Output:
2418 *
2419 * * Difference between the two tensors. The output shape is determined by<b>x</b> and <b>y</b>. 
2420 * * If <b>x</b> and <b>y</b> are of the same shape, the output tensor has the same shape as <b>x</b> and <b>y</b>.
2421 *   If <b>x</b> and <b>y</b> are of different shapes, perform the broadcast operation on <b>x</b> or <b>y</b> first.
2422 *   The precision of the output is the same as the input tensor with higher precision.
2423 *
2424 * @since 3.2
2425 * @version 2.1
2426 */
2427struct SubFusion
2428{
2429    /** Activation function type. For details, see {@link ActivationType}. */
2430    enum ActivationType activationType;
2431};
2432
2433/**
2434 * @brief Copies a tensor by the specified times.
2435 *
2436 * The {@link NodeType} of this operator is <b>NODE_TYPE_TILE_FUSION</b>.
2437 *
2438 * Input:
2439 *
2440 * * <b>x</b>, an n-dimensional tensor.
2441 * * <b>multiples</b>, a 1D tensor that specifies the number of times that the input tensor is copied
2442 *   in each dimension.
2443 * * The length <b>multiples</b> is not less than the number of dimensions of <b>x</b>.
2444 *
2445 * Output:
2446 *
2447 * * An m-dimensional tensor, with the same data type as <b>x</b>. If the length of <b>x</b> is the same as that of
2448 *   <b>multiples</b>, the number of dimensions of the output tensor is the same as that of the <b>x</b>, that is,
2449 *   an n-dimensional tensor is output.
2450 *   If the length of <b>multiples</b> is greater than <b>n</b>, <b>1s</b> are used to pad the dimensions of <b>x</b>.
2451 *   Then, <b>x</b> is copies the specified number of times in each dimension to obtain an m-dimensional tensor.
2452 *
2453 * @since 3.2
2454 * @version 2.1
2455 */
2456struct TileFusion
2457{
2458    /** A 1D tensor that specifies the number of times that data is copied in each dimension. The length <b>m</b> is not
2459      * less than the number of dimensions of <b>x</b>.
2460      */
2461    long[] dims;
2462};
2463
2464/**
2465 * @brief Obtains the first K maximum values along the axis and their indices.
2466 *
2467 * The {@link NodeType} of this operator is <b>NODE_TYPE_TOPK_FUSION</b>.
2468 *
2469 * Input:
2470 *
2471 * * <b>x</b>, an n-dimensional tensor.
2472 *
2473 * Output:
2474 *
2475 * * <b>output0</b>, the first K maximum values in the axis dimension.
2476 * * <b>output1</b>, indices of the first K maximum values.
2477 *
2478 * @since 3.2
2479 * @version 2.1
2480 */
2481struct TopKFusion
2482{
2483    /** The value <b>true</b> means to sort the elements in descending order; the value <b>false</b> means
2484      * the opposite.
2485      */
2486    boolean sorted;
2487    /** Specified axis. */
2488    long axis;
2489};
2490
2491/**
2492 * @brief Transposes a tensor.
2493 *
2494 * The {@link NodeType} of this operator is <b>NODE_TYPE_TRANSPOSE</b>.
2495 *
2496 * Input:
2497 *
2498 * * <b>x</b>, an n-dimensional tensor to transpose.
2499 * * <b>perm</b>, a 1D tensor that specifies the permutation. Its length is the same as the number of dimensions of
2500 * * <b>x</b>.
2501 *
2502 * Output:
2503 *
2504 * * An n-dimensional tensor, which has the same data type and quantization parameters as <b>x</b>.
2505 *   The shape is determined by the shape of <b>x</b> and <b>perm</b>.
2506 *
2507 * @since 3.2
2508 * @version 2.1
2509 */
2510struct Transpose
2511{
2512};
2513
2514/**
2515 * @brief Adds a dimension based on the value of <b>axis</b>. *
2516 *
2517 * The {@link NodeType} of this operator is <b>NODE_TYPE_UNSQUEEZE</b>.
2518 *
2519 * Input:
2520 *
2521 * * <b>x</b>, an n-dimensional tensor.
2522 *
2523 * Output:
2524 *
2525 * * Tensor output.
2526 *
2527 * @since 3.2
2528 * @version 2.1
2529 */
2530struct Unsqueeze
2531{
2532    /** Dimension to add. The value of <b>axis</b> can be an integer or an array of integers.
2533      * The value range of the integer is [-n, n).
2534      */
2535    long[] axis;
2536};
2537
2538/**
2539 * @brief Unpacks the given dimension of a rank-R tensor into rank-(R-1) tensors.
2540 * Unpacks tensors from <b>x</b> by chipping it along the <b>axis</b> dimension.
2541 * For example, given a tensor of shape (A, B, C, D);
2542 * If axis == 0, then the i'th tensor in output is the slice value[i, :, :, :],\n
2543 * and each tensor in output will have shape (B, C, D).
2544 * If axis == 1, then the i'th tensor in output is the slice value[:, i, :, :],\n
2545 * and each tensor in output will have shape (A, C, D). Etc.
2546 * This is the opposite of stack.
2547 *
2548 * The {@link NodeType} of this operator is <b>NODE_TYPE_UNSTACK</b>.
2549 *
2550 * Input:
2551 *
2552 * * <b>x</b>, an n-dimensional tensor.
2553 *
2554 * Output:
2555 *
2556 * * Tensor output.
2557 *
2558 * @since 5.0
2559 * @version 2.1
2560 */
2561struct Unstack
2562{
2563    /** The specific axis to apply unstack operation. */
2564    long axis;
2565};
2566
2567/**
2568 * @brief Selects elements from x1 or x2 based on condition and returns a tensor.
2569 *
2570 * The {@link NodeType} of this operator is <b>NODE_TYPE_WHERE</b>.
2571 *
2572 * Input:
2573 *
2574 * * <b>condition</b>, A Tensor of type bool.
2575 * * <b>x1</b>, a tensor which may have the same shape as <b>condition</b>.
2576 *   If condition is rank 1, x1 may have higher rank, but its first dimension must match the size of condition.
2577 * * <b>x2</b>, a tensor with the same data type and shape as <b>x1</b>.
2578 * Output:
2579 *
2580 * * Tensor output. Has the same shape as <b>x1</b>.
2581 *
2582 * @since 5.0
2583 * @version 2.1
2584 */
2585struct Where
2586{
2587};
2588
2589/**
2590 * @brief Selects elements from x1 or x2, depending on condition.
2591 * The <b>x1</b> and <b>x2</b> tensors must all have the same shape, and the output will also have that shape.
2592 * The condition tensor must be a scalar if <b>x1</b> and <b>x2</b> are scalars.
2593 * If <b>x<b> and <b>x2</b> are vectors or higher rank, then condition must be either a scalar,
2594 * a vector with size matching the first dimension of x, or must have the same shape as x.
2595 * The condition tensor acts as a mask that chooses, based on the value at each element,
2596 * whether the corresponding element / row in the output should be taken from x (if true) or y (if false).
2597 *
2598 * The {@link NodeType} of this operator is <b>NODE_TYPE_SELECT</b>.
2599 *
2600 * Input:
2601 *
2602 * * <b>condition</b>, A Tensor of type bool.
2603 * * <b>x1</b>, a tensor which may have the same shape as <b>condition</b>.
2604 *   If condition is rank 1, x1 may have higher rank, but its first dimension must match the size of condition.
2605 * * <b>x2</b>, a tensor with the same data type and shape as <b>x1</b>.
2606 * Output:
2607 *
2608 * * Tensor output. Has the same shape as <b>x1</b>.
2609 *
2610 * @since 5.0
2611 * @version 2.1
2612 */
2613struct Select
2614{
2615};
2616
2617/**
2618 * @brief Computes the Gauss error function of <b>x</b> element-wise.
2619 *
2620 * The {@link NodeType} of this operator is <b>NODE_TYPE_ERF</b>.
2621 *
2622 * Input:
2623 *
2624 * * <b>x</b>, an n-dimensional tensor.
2625 *
2626 * Output:
2627 *
2628 * * Tensor output. Has the same data type and shape as x.
2629 *
2630 * @since 5.0
2631 * @version 2.1
2632 */
2633struct Erf
2634{
2635};
2636
2637/**
2638 * @brief Computes the log-softmax function to n-dimensional input tensor.
2639 *
2640 * The input is transformed by the Softmax function and then by the log function to lie in range[-inf,0).
2641 *
2642 * The {@link NodeType} of this operator is <b>NODE_TYPE_LOG_SOFTMAX</b>.
2643 *
2644 * Input:
2645 *
2646 * * <b>x</b>, an n-dimensional tensor.
2647 *
2648 * Output:
2649 *
2650 * * Tensor output. Has the same data type and shape as x.
2651 *
2652 * @since 5.0
2653 * @version 2.1
2654 */
2655struct LogSoftmax
2656{
2657    /** The axis to apply LogSoftmax operation, -1 means the last dimension. */
2658    long axis;
2659};
2660
2661/**
2662 * @brief Gathers slices from a tensor by indices.
2663 *
2664 * The {@link NodeType} of this operator is <b>NODE_TYPE_GATHER_ND</b>.
2665 *
2666 * Input:
2667 *
2668 * * <b>x</b>, the target tensor to gather values.
2669 * * <b>indices</b>, the index tensor with int32 or int64 data type.
2670 *
2671 * Output:
2672 *
2673 * * Sliced tensor. Has the same type as input_x and the shape is indices_shape[:-1] + x_shape[indices_shape[-1]:].
2674 *
2675 * @since 5.0
2676 * @version 2.1
2677 */
2678struct GatherNd
2679{
2680};
2681
2682/**
2683 * @brief Converts the data type.
2684 *
2685 * The {@link NodeType} of this operator is <b>NODE_TYPE_QUANT_DTYPE_CAST_V2</b>.
2686 * The v1 version {@link NodeType} of this operator is <b>NODE_TYPE_QUANT_DTYPE_CAST</b>,
2687 * and the corresponding NodeAttrType is {@link QuantDTypeCast}.
2688 *
2689 * Input:
2690 *
2691 * * <b>x</b>, an n-dimensional tensor.
2692 *
2693 * Output:
2694 *
2695 * * Tensor after the data type conversion.
2696 *
2697 * @since 5.0
2698 * @version 2.1
2699 */
2700struct QuantDTypeCastV2
2701{
2702    /** Data type of the input tensor. */
2703    long srcT;
2704    /** Data type of the output tensor. */
2705    long dstT;
2706    /** The axis to apply QuantDTypeCastV2 operation. */
2707    long axis;
2708};
2709
2710/** @} */
2711