1/*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @addtogroup NNRt
18 * @{
19 *
20 * @brief Provides a unified interface for AI chip drivers to access OpenHarmony.
21 * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field.
22 *
23 * @since 3.2
24 * @version 2.0
25 */
26
27/**
28 * @file NodeAttrTypes.idl
29 *
30 * @brief Defines the parameters and functionality of AI model operators.
31 * 
32 * All structures in this file declare only operator attributes and do not contain the interfaces for executing\n
33 * operator functions.
34 * - 1. The operators in the file are in one-to-one correspondence with a {@link NodeType}. In model inference,\n
35 * {@link NodeType} is stored in nodeType of {@link Node}.
36 * - 2. Each operator has at least one input and one output. The input is the tensor received by the operator,\n
37 * and the output is the tensor produced after the operator operation. The relationship between the input, operator,\n
38 * and output is determined by <b>inputIndex</b> and <b>outIndex</b> of the {@link Node} structure.
39 *
40 * @since 3.2
41 * @version 2.0
42 */
43
44/**
45 * @brief Defines the package path of the NNRt module.
46 *
47 * @since 3.2
48 * @version 2.0
49 */
50package ohos.hdi.nnrt.v2_0;
51
52import ohos.hdi.nnrt.v2_0.NnrtTypes;
53
54/**
55 * @brief Defines the operator of the activation type. All activation functions belong to this operator.\n
56 * The specific activation function type is determined by the parameters.
57 * 
58 * The {@link NodeType} of this operator is <b>NODE_TYPE_ACTIVATION</b>.
59 *
60 * Input:
61 *
62 * * <b>x</b>, an n-dimensional tensor.
63 *
64 * Output:
65 *
66 * * A tensor returned after the activation function is executed.
67 *
68 * @since 3.2
69 * @version 2.0
70 */
71struct Activation
72{
73    /** Activation function type. */
74    enum ActivationType activationType;
75    /** Size factor, used for the <b>LeakyReLU</b> and <b>ELU</b> activation functions. */
76    float alpha;
77    /** Minimum value, used for the <b>HardTanh</b> activation function. */
78    float minVal;
79    /** Maximum value, used for the <b>HardTanh</b> activation function. */
80    float maxVal;
81    /** Whether to use the approximation algorithm. It is used for the <b>GRLU</b> activation function. */
82    boolean approximate;
83};
84
85/**
86 * @brief Adds tensors. The output shape is the same as the input one after broadcasting, and the data type\n
87 * is the one with higher precision of the two inputs.
88 * 
89 * The {@link NodeType} of this operator is <b>NODE_TYPE_ADD_FUSION</b>.
90 *
91 * Input:
92 *
93 * * <b>x</b>, the first input tensor.
94 * * <b>y</b>, the second input tensor. The data type must be the same as that of the first tensor.
95 *
96 * * Output:
97 *
98 * * Sum of the elements of <b>x</b> and <b>y</b>. The data shape is the same as the one after broadcasting,\n
99 *   and the data type is the one with higher precision of the two inputs.
100 *   If <b>activationType</b> is configured, the specified activation function will be called before
101 *   the output is returned.
102 *
103 * @since 3.2
104 * @version 2.0
105 */
106struct AddFusion
107{
108    /** Activation function type. For details, see {@link ActivationType}. */
109    enum ActivationType activationType;
110};
111
112/**
113 * @brief Obtains the first K indices or values of a cross-axis tensor.
114 * 
115 * The {@link NodeType} of this operator is <b>NODE_TYPE_ARGMAX_FUSION</b>.
116 *
117 *
118 * Input:
119 *
120 * * <b>x</b>, a tensor of shape <b>(N,*)</b>, where * indicates any number of additional dimensions.
121 *
122 * Output:
123 *
124 * * First <b>K</b> indices or values before the maximum input tensor on the axis.
125 *
126 * @since 3.2
127 * @version 2.0
128 */
129struct ArgMaxFusion
130{
131    /** Target axis where the maximum indices or values are obtained. */
132    long axis;
133    /** First <b>K</b> maximum values on the axis. */
134    long topK;
135    /** Whether to keep the output dimensions the same as the input dimensions. */
136    boolean keepDims;
137    /** Return the index if the value is <b>false</b>. Return the value if the value is <b>true</b>.\n
138      * The default value is <b>false</b>.
139      */
140    boolean outMaxValue;
141};
142
143/**
144 * @brief Applies a 2D average pooling on the input tensor. The int8 quantization input is supported.
145 *
146 * The {@link NodeType} of this operator is <b>NODE_TYPE_AVGPOOL_FUSION</b>.
147 *
148 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.\n
149 * In other cases, <b>padding</b> must be <b>0</b>.
150 *
151 * Input:
152 * 
153 * * <b>x</b>, an n-dimensional tensor.
154 *
155 * Output:
156 *
157 * * Tensor after average pooling.
158 *
159 * @since 3.2
160 * @version 2.0
161 */
162struct AvgPoolFusion
163{
164    /**
165     * Kernel size used to obtain the average value. It is an int array in the format of [kernel_height, kernel_weight]\n
166     * with length of 2.
167     * The first number indicates the kernel height, and the second number indicates the kernel width.
168     */
169    long[] kernelSize;
170    /**
171     * Distance of kernel moving. The value is an int array [stride_height, stride_weight] with length of 2.\n
172     * The first number indicates the moving size in height, and the second number indicates the moving size in width.
173     */
174    long[] strides;
175    /** <b>x</b> is padded with an int array [top, bottom, left, right] with length of 4, and the nearest neighbor values\n
176      * are used for padding.
177      */
178    long[] pad;
179    /** Padding method. */
180    enum PadMode padMode;
181    /** Numerical operation mode of the output tensor. */
182    enum RoundMode roundMode;
183    /** Format of the data during calculation. For details, see {@link Format}. */
184    enum Format format;
185    /** Whether to do global pooling. */
186    boolean global;
187    /** Activation function type. For details, see {@link ActivationType}. */
188    enum ActivationType activationType;
189};
190
191/**
192 * @brief Divides the batch dimension of a 4D tensor into small blocks by <b>block_shape</b>, and interleaves these blocks\n
193 * back into the spatial dimension.
194 *
195 * The {@link NodeType} of this operator is <b>NODE_TYPE_BATCH_TO_SPACE_ND</b>.
196 *
197 * Input:
198 *
199 * * <b>x</b>, an n-dimensional tensor.
200 *
201 * Output:
202 *
203 * * Output tensor. Assume that the shape of <b>x</b> is (n,h,w,c) and the shape of output is (n',h',w',c'):
204 * \f$ n' = n / (block_shape[0] * block_shape[1])\f$<br>
205 * \f$ h' = h * block_shape[0] - crops[0][0] - crops[0][1] \f$<br>
206 * \f$ w' = w * block_shape[1] - crops[1][0] - crops[1][1] \f$<br>
207 * \f$ c'= c \f$
208 *
209 * @since 3.2
210 * @version 2.0
211 */
212struct BatchToSpaceND
213{
214    /** Block size, which is an array [height_block, weight_block] with length of 2. */
215    long[] blockShape;
216    /**
217     * Crop values for the spatial dimension.
218     * It is a 2D array [crop0_start, crop0_end], [crop1_start, crop1_end] with the shape of (2, 2).
219     */
220    long[][] crops;
221};
222
223/**
224 * @brief Offsets the data in each dimension of the input tensor.
225 *
226 * The {@link NodeType} of this operator is <b>NODE_TYPE_BIAS_ADD</b>.
227 *
228 * Input:
229 *
230 * * <b>x</b>, an n-dimensional tensor.
231 * * <b>bias</b>, the bias tensor.
232 *
233 * Output:
234 *
235 * * Output tensor, which is the sum of the input tensor and the bias in each dimension.
236 *
237 * @since 3.2
238 * @version 2.0
239 */
240struct BiasAdd
241{
242};
243
244/**
245 * @brief Converts the tensor data type.
246 *
247 * The {@link NodeType} of this operator is <b>NODE_TYPE_CAST</b>.
248 *
249 * Input:
250 *
251 * * <b>x</b>, an n-dimensional tensor.
252 * * <b>type</b>, the target type of the data.
253 *
254 * Output:
255 *
256 * * A tensor with the specified data type.
257 *
258 * @since 3.2
259 * @version 2.0
260 */
261struct Cast
262{
263};
264
265/**
266 * @brief Connects tensors in the specified axis or connects input tensors along with the given axis.
267 *
268 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONCAT</b>.
269 *
270 * Input:
271 *
272 * * Tensors with the same dimension.
273 *
274 * Output:
275 *
276 * * Result of the tensors connected.
277 *
278 * @since 3.2
279 * @version 2.0
280 */
281struct Concat
282{
283    /** Axis specified. The value must be less than the number of dimensions of the input tensor. */
284    long axis;
285};
286
287/**
288 * @brief Calculates the 2D convolution on a 4D tensor.
289 *
290 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONV2D_FUSION</b>.
291 *
292 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.
293 * In other cases, <b>padding</b> must be <b>0</b>.
294 *
295 * Input:
296 *
297 * * <b>x</b>, a 4D tensor in NHWC format.
298 * * <b>weight</b>, a convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
299 *   The value of <b>inChannel</b> divided by <b>group</b>must be an integer.
300 * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.\n
301 *   In the quantization scenario, <b>bias</b> does not need to be quantized.
302 *   If quantization is required, the input data must be of the int32 type.
303 *   The actual quantization parameter is determined by <b>x</b> and <b>weight</b>.
304 *
305 * Output:
306 *
307 * * Convolution output.
308 *
309 * @since 3.2
310 * @version 2.0
311 */
312struct Conv2DFusion
313{
314    /** Size (height and width) of the convolution kernel. */
315    long[] kernelSize;
316    /** Movement stride of the convolution kernel in height and weight.\n
317      * It is an int array [strideHeight, strideWidth] with length of 2.
318      */
319    long[] stride;
320    /**
321     * Dilation size of the convolution kernel in height and weight. It is an int array in the format of\n
322     * [dilationHeight, dilationWidth].
323     * The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>x</b>.
324     */
325    long[] dilation;
326    /** Padding mode. For details, see {@link PadMode}. */
327    enum PadMode padMode;
328    /** Padding around <b>x</b> in the height and width directions. It is an int array [top, bottom, left, right]\n
329      * with length of 4.
330      */
331    long[] padList;
332    /**
333     * Splits <b>x</b> into groups by <b>inChannel</b>. The <b>group</b> value is of the int type.
334     * If <b>group</b> is <b>1</b>, it is a conventional convolution.
335     * If <b>group</b> is <b>inChannel</b>, it is depthwiseConv2d. In this case, group==in_channel==out_channel.
336     * If <b>group</b> is greater than <b>1</b> and less than <b>inChannel</b>, it is group convolution.\n
337     * In this case, out_channel==group.
338     */
339    long group;
340    /** Number of input channels. */
341    long inChannel;
342    /** Number of output channels. */
343    long outChannel;
344    /** Activation function type. For details, see {@link ActivationType}. */
345    enum ActivationType activationType;
346};
347
348/**
349 * @brief Calculates a 2D transposed convolution on a 4D tensor.
350 *
351 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONV2D_TRANSPOSE_FUSION</b>.
352 *
353 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.\n
354 * In other cases, <b>padding</b> must be <b>0</b>.
355 *
356 * Input:
357 *
358 * * <b>x</b>, a 4D tensor in NHWC format.
359 * * <b>weight</b>, a convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
360 * * The value of <b>inChannel</b> divided by <b>group</b>must be an integer.
361 * * <b>bias</b>: bias of the convolution. It is an array with a length of <b>[outChannel]</b>.\n
362 * * In the quantization scenario, <b>bias</b> does not need to be quantized.
363 * * If quantization is required, the input data must be of the int32 type.
364 * * The actual quantization parameter is determined by <b>x</b> and <b>weight</b>.
365 *
366 * Output:
367 *
368 * * An n-dimensional tensor.
369 *
370 * @since 3.2
371 * @version 2.0
372 */
373struct Conv2dTransposeFusion
374{
375    /** Size (height and width) of the convolution kernel. */
376    long[] kernelSize;
377    /** Movement stride of the convolution kernel in height and weight. It is an int array [strideHeight, strideWidth]\n
378      * with length of 2.
379      */
380    long[] stride;
381    /** Dilation size of the convolution kernel in height and weight. It is an int array [dilationHeight, dilationWidth]\n
382      * with length of 2.
383      * The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>x</b>.
384      */
385    long[] dilation;
386    /** Padding mode. For details, see {@link PadMode}. */
387    enum PadMode padMode;
388    /** Padding around the input <b>x</b> in the height and width directions. It is an int array [top, bottom, left, right]\n
389      * with length of 4. */
390    long[] padList;
391    /**
392     * Splits <b>x</b> into groups by <b>inChannel</b>.
393     * If <b>group</b> is <b>1</b>, it is a conventional convolution.
394     * If group is greater than <b>1</b> and less than or equal to <b>inChannel</b>, this is a group convolution.
395     */
396    long group;
397    /** Number of input channels. */
398    long inChannel;
399    /** Number of output channels. */
400    long outChannel;
401    /** Activation function type. For details, see {@link ActivationType}. */
402    enum ActivationType activationType;
403    /**
404     * A list of integer array with two elements, specifying the paddings along the height and width of the output tensor.
405     */
406    long[] outputPaddings;
407};
408
409/**
410 * @brief Divides the first tensor by the second tensor element-wise.
411 *
412 * The {@link NodeType} of this operator is <b>NODE_TYPE_DIV_FUSION</b>.
413 *
414 * Input:
415 *
416 * * <b>x1</b>, a tensor of the int or float type.
417 * * <b>x2</b>, a tensor of the int or float type.
418 *
419 * Output:
420 *
421 * * Quotient of the two inputs.
422 *
423 * @since 3.2
424 * @version 2.0
425 */
426struct DivFusion
427{
428    /** Activation function type. For details, see {@link ActivationType}. */
429    enum ActivationType activationType;
430};
431
432/**
433 * @brief Performs an element-wise operation.
434 *
435 * The {@link NodeType} of this operator is <b>NODE_TYPE_ELTWISE</b>.
436 *
437 * Input:
438 *
439 * * <b>x1</b>, the first input tensor.
440 * * <b>x2</b>, the second input tensor.
441 *
442 * Output:
443 *
444 * * A tensor with the same data type and shape as <b>x1</b>.
445 *
446 * @since 3.2
447 * @version 2.0
448 */
449struct Eltwise
450{
451    /** Element-wise operation type. For details, see {@link EltwiseMode}. */
452    enum EltwiseMode mode;
453};
454
455/**
456 * @brief Adds an additional dimension to a tensor at the given axis.
457 *
458 * The {@link NodeType} of this operator is <b>NODE_TYPE_EXPAND_DIMS</b>.
459 *
460 * Input:
461 *
462 * * <b>x</b>, an n-dimensional tensor.
463 * * <b>axis</b>: index of the dimension to be added. The value is of the int32_t type and must be a constant\n
464   * in the range [-dim-1, dim].
465 *
466 * Output:
467 *
468 * * Operator with an additional dimension at the given axis.
469 *
470 * @since 3.2
471 * @version 2.0
472 */
473struct ExpandDims
474{
475};
476
477/**
478 * @brief Creates a tensor of the specified dimensions and fills it with a scalar.
479 *
480 * The {@link NodeType} of this operator is <b>NODE_TYPE_FILL</b>.
481 *
482 * Input:
483 *
484 * * <b>value</b>: scalar used to fill the tensor.
485 * * <b>shape</b>, which specifies the dimensions of the tensor to create.
486 * Output:
487 *
488 * * Tensor filled by the scaler.
489 *
490 * @since 3.2
491 * @version 2.0
492 */
493struct Fill
494{
495};
496
497/**
498 * @brief Applies full connection for the input data.
499 *
500 * The {@link NodeType} of this operator is <b>NODE_TYPE_FULL_CONNECTION</b>.
501 *
502 * When <b>useAxis</b> is <b>true</b>, <b>axis</b> must be set. When <b>useAxis</b> is <b>false</b>,\n
503 * <b>axis</b> is <b>0</b>.
504 *
505 * Input:
506 *
507 * * <b>x</b>, an n-dimensional tensor.
508 * * <b>weight</b>: weight tensor for a full connection.
509 * * <b>bias</b>, a full-connection bias. In quantization scenarios, a quantized parameter is not required.\n
510 * * If quantization is required, the data must be of the int32 type. The actual quantization parameter is\n
511 * * determined by <b>x</b> and <b>weight</b>.
512 * *
513 * Output:
514 *
515 * * <b>output</b>: computed tensor.
516 *
517 * @since 3.2
518 * @version 2.0
519 */
520struct FullConnection
521{
522    /** Whether to use the bias. */
523    boolean hasBias;
524    /** Whether to use the axis. */
525    boolean useAxis;
526    /** Axis specified for the full connection. The specified axis and its following axes are converted into\n
527      * a 1D tensor and then apply the full connection.
528      */
529    long axis;
530    /** Activation function type. For details, see {@link ActivationType}. */
531    enum ActivationType activationType;
532};
533
534/**
535 * @brief Performs batch normalization for a tensor.
536 *
537 * The {@link NodeType} of this operator is <b>NODE_TYPE_FUSED_BATCH_NORM</b>.
538 *
539 * Input:
540 *
541 * * <b>x</b>: a tensor of shape [N, ..., C], that is, the nth dimension is the number of channels.
542 * * <b>scale</b>: 1D tensor of the scaling factor used to scale the first normalized tensor.
543 * * <b>offset</b>: 1D tensor used to move to the first normalized tensor.
544 * * <b>mean</b>: 1D tensor of the overall mean value. It is used only for inference. In case of training,\n
545 * * this parameter must be left empty.
546 * * <b>variance</b>: 1D tensor used for the overall variance. It is used only for inference. In case of training,\n
547 * * this parameter must be left empty.
548 *
549 * Output:
550 *
551 * * <b>output</b>: computed tensor.
552 *
553 * @since 3.2
554 * @version 2.0
555 */
556struct FusedBatchNorm
557{
558    /** A small value close to zero. It is used to ensure that the divisor is not 0. */
559    float epsilon;
560};
561
562/**
563 * @brief Returns the slice of the input tensor based on the specified index and axis.
564 *
565 * The {@link NodeType} of this operator is <b>NODE_TYPE_GATHER</b>.
566 *
567 * Input:
568 *
569 * * <b>x</b>, an n-dimensional tensor.
570 * * <b>inputIndices</b>, indices of elements of the original tensor. The value is an array of the int type\n
571 * * and must be in the range [0, x.shape[axis]).
572 * * <b>axis</b>, the axis on which <b>x</b> is sliced. The value is an array with one element of the int32_t type.
573 *
574 * Output:
575 *
576 * * Sliced tensor.
577 *
578 * @since 3.2
579 * @version 2.0
580 */
581struct Gather
582{
583};
584
585/**
586 * @brief Applies layer normalization for a tensor from the specified axis.
587 *
588 * The {@link NodeType} of this operator is <b>NODE_TYPE_LAYER_NORM_FUSION</b>.
589 *
590 * Input:
591 *
592 * * <b>x</b>, an n-dimensional tensor.
593 * * <b>gamma</b>, an m-dimensional tensor. The dimensions of <b>gamma</b> must be the same as the shape of the part\n
594 * * of the input tensor to normalize.
595 * * <b>beta</b>, an m-dimensional tensor with the same shape as <b>gamma</b>.
596 *
597 * Output:
598 *
599 * * An n-dimensional tensor, with the same data type and shape as the input tensor.
600 *
601 * @since 3.2
602 * @version 2.0
603 */
604struct LayerNormFusion
605{
606    /** Start axis of <b>x</b> to apply layer normalization. */
607    long beginNormAxis;
608    /** A value added to the denominator for numerical stability. */
609    float epsilon;
610    /** Whether to perform an element-wise operation. */
611    boolean elementwiseAffine;
612    /** Start axis of the parameter input (gamma, beta) to apply layer normalization.\n
613      * The value must be in the range [-n, n).
614      */
615    long beginParamsAxis;
616};
617
618/**
619 * @brief Calculates the result of <b>x1</b> <= <b>x2</b> element-wise.
620 *
621 * The {@link NodeType} of this operator is <b>NODE_TYPE_LESS_EQUAL</b>.
622 *
623 * Input:
624 *
625 * * <b>x1</b>, which can be a number, a Boolean value, or a tensor whose data type is number or Boolean.
626 * * <b>x2</b>, which can be a number or a Boolean value if <b>x1</b> is a tensor; or a tensor with the data type\n
627 * * of number or Boolean if <b>x1</b> is not a tensor.
628 *
629 * Output:
630 *
631 * * A tensor with the data type of Boolean. When a quantization model is used, the quantization parameters of\n
632 * * the output cannot be omitted. However, values of the quantization parameters do not affect the result.
633 *
634 * @since 3.2
635 * @version 2.0
636 */
637struct LessEqual
638{
639};
640
641/**
642 * @brief Calculates the matrix product of <b>x1</b> and <b>x2</b>.
643 *
644 * The {@link NodeType} of this operator is <b>NODE_TYPE_MATMUL_FUSION</b>.
645 *
646 * Input:
647 *
648 * * <b>x1</b>, an n-dimensional tensor, whose data type can be number or Boolean.
649 * * <b>x2</b>, an n-dimensional tensor, whose data type can be number or Boolean.
650 *
651 * Output:
652 *
653 * * Matrix product of the inputs. When type! When = DATA_TYPE_UNKNOWN, the data type of the output is determined by\n
654 * * <b>type</b>. When type==DATA_TYPE_UNKNOWN,
655 * * the data type of the output depends on the data type converted during the calculation of <b>x1</b> and <b>x2</b>.
656 *
657 * @since 3.2
658 * @version 2.0
659 */
660struct MatMulFusion
661{
662    /** Whether to transpose the <b>x1</b> matrix. */
663    boolean transposeA;
664    /** Whether to transpose the <b>x2</b> matrix. */
665    boolean transposeB;
666    /** Activation function type. For details, see {@link ActivationType}. */
667    enum ActivationType activationType;
668};
669
670/**
671 * @brief Calculates the maximum of <b>x1</b> and <b>x2</b> element-wise. The inputs of <b>x1</b> and <b>x2</b>\n
672 * comply with the implicit type conversion rules to make the data types are consistent.
673 * 
674 * The input must be two tensors or one tensor and one scalar. When the input is two tensors, the data types\n
675 * cannot be Boolean at the same time, and their shapes can be broadcast to the same size. When the inputs are\n
676 * one tensor and one scalar, the scalar must be a constant.
677 *
678 * The {@link NodeType} of this operator is <b>NODE_TYPE_MAXIMUM</b>.
679 *
680 * Input:
681 *
682 * * <b>x1</b>, an n-dimensional tensor, whose data type can be number or Boolean.
683 * * <b>x2</b>, an n-dimensional tensor, whose data type can be number or Boolean.
684 *
685 * Output:
686 *
687 /** Maximum value of the elements of the two tensors.
688 *
689 * @since 3.2
690 * @version 2.0
691 */
692struct Maximum
693{
694};
695
696/**
697 * @brief Applies a 2D maximum pooling over an input tensor.
698 *
699 * The {@link NodeType} of this operator is <b>NODE_TYPE_MAX_POOL_FUSION</b>.
700 *
701 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>.\n
702 * In other cases, <b>padding</b> must be <b>0</b>.
703 *
704 * Input:
705 *
706 * * <b>x</b>, an n-dimensional tensor.
707 *
708 * Output:
709 *
710 /** Maximum value of the elements of the two tensors.
711 *
712 * @since 3.2
713 * @version 2.0
714 */
715struct MaxPoolFusion
716{
717    /** Size of the kernel used to take the maximum value. It is an int array [kernel_height, kernel_weight]\n
718      * with length of 2.
719      */
720    long[] kernelSize;
721    /** Distance of kernel moving. It is an int array with two elements. */
722    long[] strides;
723    /** Array to pad. */
724    long[] pad;
725    /** Padding mode. For details, see {@link PadMode}. */
726    enum PadMode padMode;
727    /** Format of the tensor data. For details, see {@link Format}. */
728    enum Format format;
729    /** RoundMode mode. For details, see {@link RoundMode}. */
730    enum RoundMode roundMode;
731    /** Whether to do global pooling. */
732    boolean global;
733    /** Activation function type. For details, see {@link ActivationType}. */
734    enum ActivationType activationType;
735};
736
737/**
738 * @brief Multiplies the elements in the same position of <b>x1</b> and <b>x2</b> to obtain output.
739 *  
740 * If the shapes of <b>x1</b> and <b>x2</b> are different, expand <b>x1</b> and <b>x2</b> to the same shape through\n
741 * broadcast and then perform the multiplication.
742 * The {@link NodeType} of this operator is <b>NODE_TYPE_MUL_FUSION</b>.
743 *
744 * Input:
745 *
746 * * <b>x1</b>, a tensor of the int or float type.
747 * * <b>x2</b>, a tensor of the int or float type.
748 *
749 * Output:
750 *
751 * * Product of each element of <b>x1</b> and <b>x2</b>.
752 *
753 * @since 3.2
754 * @version 2.0
755 */
756struct MulFusion
757{
758    /** Activation function type. For details, see {@link ActivationType}. */
759    enum ActivationType activationType;
760};
761
762/**
763 * @brief Generates a one-hot tensor based on the specified locations.
764 *
765 * The locations specified by <b>indices</b> are determined by <b>on_value</b>, and other locations are determined\n
766 * by <b>off_value</b>.
767 *
768 * The {@link NodeType} of this operator is <b>NODE_TYPE_ONE_HOT</b>.
769 *
770 * Input:
771 *
772 * * <b>indices</b>, an n-dimensional tensor. Each element in <b>indices</b> determines the location of <b>on_value</b>\n
773 * * in each one-hot vector.
774 * * <b>depth</b>, an integer scalar that determines the depth of the one-hot vector. The value of <b>depth</b> must be\n
775 * * greater than <b>0</b>.
776 * * <b>on_value</b>, a scalar that specifies a valid value in the one-hot vector.
777 * * <b>off_value</b>, a scalar that specifies the values of other locations in the one-hot vector except the valid value.
778 *
779 * Output:
780 *
781 * * An (n+1)-dimensional tensor if <b>indices</b> is an n-dimensional tensor. The output shape is determined by\n
782 * * <b>indices</b> and <b>axis</b>.
783 *
784 * @since 3.2
785 * @version 2.0
786 */
787struct OneHot
788{
789    /**
790     * An integer scalar that specifies the dimension for inserting the one-hot.
791     * Assume that the shape of <b>indices</b> is [N, C],\n
792     * and the value of <b>depth</b> is D. 
793     * When <b>axis</b> is <b>0</b>, the shape of the output is [D, N, C].
794     * When <b>axis</b> is <b>-1</b>, the shape of the output is [N, C, D].
795     * When <b>axis</b> is <b>-1</b>, the shape of the output is [N, D, C].
796     *
797     */
798    long axis;
799};
800
801/**
802 * @brief Pads the input tensor.
803 *
804 * The {@link NodeType} of this operator is <b>NODE_TYPE_PAD_FUSION</b>.
805 *
806 * When paddingMode==PADDING_MODE_CONSTANT, <b>constantValue</b> must be set.
807 * The default value of <b>constantValue</b> is <b>0</b>.
808 *
809 * Input:
810 *
811 * * <b>x</b>, an n-dimensional tensor.
812 * * <b>paddings</b>, a 2D tensor that specifies the length to add in each dimension.
813 * * The shape is [n, 2]. <b>paddings[i][0]</b> indicates the number of paddings to add before the input 
814 * * tensor in ith dimension.
815 * * <b>paddings[i][1]</b> indicates the number of paddings to add after the input tensor in ith dimension.
816 *
817 * Output:
818 *
819 * * An n-dimensional tensor after padding, with the same dimensions and data type as <b>x</b>.
820 * * The shape is determined by <b>x</b> and <b>paddings</b>.
821 *   output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1]
822 *
823 * @since 3.2
824 * @version 2.0
825 */
826struct PadFusion
827{
828    /**
829     * A 2D tensor, specifying the length to add in each dimension. The shape is [n, 2]. <b>paddings[i][0]</b>
830     * indicates the number of paddings to add before the input <b>x</b> in the ith dimension.
831     * <b>paddings[i][1]</b> indicates the number of paddings to add after the input <b>x</b> in the ith dimension.
832     * The meaning of this parameter is the same as that of <b>paddings</b> input.
833     */
834    long[][] paddings;
835    /** 
836      * Padding mode.
837     * For details, see {@link PaddingMode}.
838     */
839    enum PaddingMode paddingMode;
840    /** 
841     * A constant with the same data type as <b>x</b>. It specifies the value to add in the pad operation.
842     * This parameter is valid only when paddingMode==PADDING_MODE_CONSTANT. The default value is <b>0</b>.
843     */
844    float constantValue;
845};
846
847/**
848 * @brief Calculates the <b>y</b> power of each element in <b>x</b>. The inputs must be two tensors or one tensor
849 * and one scalar.
850 * 
851 * When the inputs are two tensors, their data types cannot be Boolean at the same time, and their shapes
852 * must be the same.
853 * When the inputs are one tensor and one scalar, the scalar must be a constant.
854 *
855 * The {@link NodeType} of this operator is <b>NODE_TYPE_POW_FUSION</b>.
856 *
857 * The <b>x' = scale*x+shift</b> operation is performed for each element of <b>x</b>, and then the <b>y</b> power of
858 * <b>x'</b> is calculated.
859 *
860 * Input:
861 *
862 * * <b>x</b>, a number, a Boolean value, or a tensor whose data type is number or Boolean.
863 * * <b>y</b>, a number, a Boolean value, or a tensor whose data type is number or Boolean.
864 *
865 * Output:
866 *
867 * * A tensor, whose shape is determined by the shape of <b>x</b> and <b>y</b> after broadcasting.
868 *
869 * @since 3.2
870 * @version 2.0
871 */
872struct PowFusion
873{
874    /** Scale the value of <b>x</b>. */
875    float scale;
876    /** Increase or decrease the value of <b>x</b> after scaling. */
877    float shift;
878};
879
880/**
881 * @brief Applies the PReLU activation of <b>x</b> and <b>weight</b>.
882 *
883 * The {@link NodeType} of this operator is <b>NODE_TYPE_PRELU_FUSION</b>.
884 *
885 * Input:
886 *
887 * * <b>x</b>, an n-dimensional tensor. If <b>n</b> is greater than or equal to 2, <b>x</b> must be
888 * * [BatchSize, ..., Channels]. The second dimension is the number of channels.
889 * * <b>weight</b>, a 1D tensor. The length of <b>weight</b> must be 1 or equal to the number of channels.
890 * * If the length of <b>weight</b> is 1, all channels share the same weight.
891 *   If the length of <b>weight</b> is equal to the number of channels, each channel exclusively has a weight.
892 *   If <b>n</b> of <b>x</b> is less than 2, the <b>weight</b> length must be 1.
893 *
894 * Output:
895 *
896 * * PReLU activation value of <b>x</b>, with the same shape and data type as <b>x</b>.
897 *
898 * @since 3.2
899 * @version 2.0
900 */
901struct PReLUFusion
902{
903    /**
904     * Whether to enable weight sharing for the parameter validity check.
905     * If the length of <b>weight</b> is <b>1</b>, <b>channelShared</b> must be <b>true</b>.
906     * Otherwise, <b>channelShared</b> is <b>false</b>.
907     */
908    boolean channelShared;
909};
910
911/**
912 * @brief Converts the data type.
913 *
914 * The {@link NodeType} of this operator is <b>NODE_TYPE_QUANT_DTYPE_CAST</b>.
915 *
916 * Input:
917 *
918 * * <b>x</b>, an n-dimensional tensor.
919 *
920 * Output:
921 *
922 * * Tensor after the data type conversion.
923 *
924 * @since 3.2
925 * @version 2.0
926 */
927struct QuantDTypeCast
928{
929    /** Data type of the input tensor. */
930    long srcT;
931    /** Data type of the output tensor. */
932    long dstT;
933};
934
935/**
936 * @brief Reduces the dimensions of a tensor.
937 *
938 * The {@link NodeType} of this operator is <b>NODE_TYPE_REDUCE_FUSION</b>.
939 * If <b>mode</b> is <b>REDUCE_ALL</b>, <b>REDUCE_PROD</b>, or <b>REDUCE_MEAN</b> and <b>reduce_to_end</b>
940 * is <b>true</b>, the output is the reduced value multiplied by <b>coeff</b>.
941 *
942 * Input:
943 *
944 * * <b>x</b>, an n-dimensional tensor, where <b>n</b> is less than 8.
945 * * <b>axis</b>, a 1D tensor that specifies the dimension to reduce. The value range of each element in axis
946 * * is [–n, n).
947 *
948 * Output:
949 *
950 * * An m-dimensional tensor, with the same data type as <b>x</b>. If <b>keepDims</b> is <b>false</b>, m < n.
951 * * If <b>keepDims</b> is <b>true</b>, m==n.
952 *
953 * @since 3.2
954 * @version 2.0
955 */
956struct ReduceFusion
957{
958    /** Whether to keep the dimensions remain unchanged. */
959    boolean keepDims;
960    /** Algorithm used to reduce the tensor dimensions. For details, see {@link ReduceMode}. */
961    enum ReduceMode mode;
962    /**
963     * If this parameter is set to <b>true</b>, the first element is obtained from axis and set to <b>i</b>,
964     * and then <b>axis</b> will be changed to [i,i+1, ...,n-1,n].
965     * For example, if <b>reduceToEnd</b> is <b>true</b>, <b>axis</b> is [2,4], and the number of dimensions of
966     * <b>x</b> is 7, then <b>axis</b> will be [2,3,4,5,6].
967     */
968    boolean reduceToEnd;
969    /** Coefficient. */
970    float coeff;
971};
972
973/**
974 * @brief Reshapes a tensor.
975 *
976 * The {@link NodeType} of this operator is <b>NODE_TYPE_RESHAPE</b>.
977 *
978 * Input:
979 *
980 * * <b>x</b>, an n-dimensional tensor.
981 * * <b>InputShape</b>, a 1D tensor that specifies the shape of the output tensor. It must be a constant.
982 *
983 * Output:
984 *
985 * * A tensor of the specified shape. The data type is the same as that of <b>x</b>.
986 *
987 * @since 3.2
988 * @version 2.0
989 */
990struct Reshape
991{
992};
993
994/**
995 * @brief Resizes a tensor.
996 *
997 * The {@link NodeType} of this operator is <b>NODE_TYPE_RESIZE</b>.
998 *
999 * The parameter combination of this operator can implement the <b>Resize</b> function.
1000 * For example, to implement bilinear interpolation on the four corners of an image that is precisely aligned, set:
1001 * method = RESIZE_METHOD_LINEAR
1002 * coordinateTransformMode = COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS
1003 *
1004 * Input:
1005 *
1006 * * <b>x</b>, a 4D tensor in the [batchSize, height, width, channels] (NHWC) format.
1007 *
1008 * Output:
1009 *
1010 * * An n-dimensional tensor, with the same shape and data type as <b>x</b>.
1011 *
1012 * @since 3.2
1013 * @version 2.0
1014 */
1015struct Resize
1016{
1017    /** Method used for resizing. For details, see {@link ResizeMethod}. */
1018    enum ResizeMethod method;
1019    /** Height of the 4D tensor after resizing. */
1020    long newHeight;
1021    /** Width of the 4D tensor after resizing. */
1022    long newWidth;
1023    /** Whether to maintain the height/width ratio of <b>x</b> after resizing. */
1024    boolean preserveAspectRatio;
1025    /**
1026     * Coordinate transformation method. For details, see {@link CoordinateTransformMode}.
1027     */
1028    enum CoordinateTransformMode coordinateTransformMode;
1029    /** Cubic coefficient, used when <b>method</b> is <b>RESIZE_METHOD_CUBIC</b>. */
1030    float cubicCoeff;
1031    /** When excludeOutside==1, the sampling weight that exceeds the boundary of <b>x</b> is set to <b>0</b>,
1032      * and other weights are normalized.
1033      */
1034    long excludeOutside;
1035    /** Value to interpolate, which is used only when <b>x</b> is cropped. The sampling weight that exceeds the
1036      * boundary is set to <b>extrapolationValue</b>.
1037      */
1038    float extrapolationValue;
1039    /** Nearest neighbor interpolation algorithm, used when <b>method</b> is <b>RESIZE_METHOD_NEAREST</b>.
1040      * For details, see {@link NearestMode}.
1041      */
1042    enum NearestMode nearestMode;
1043};
1044
1045/**
1046 * @brief Calculates the reciprocal of the square root of a tensor.
1047 *
1048 * The {@link NodeType} of this operator is <b>NODE_TYPE_RSQRT</b>.
1049 *
1050 * Input:
1051 *
1052 * *<b>x</b>, an n-dimensional tensor, where <b>n</b> is less than 8. Each element of the tensor cannot be less than 0.
1053 *
1054 * Output:
1055 *
1056 * * An n-dimensional tensor, with the same shape and data type as <b>x</b>. 
1057 *
1058 * @since 3.2
1059 * @version 2.0
1060 */
1061struct Rsqrt
1062{
1063};
1064
1065/**
1066 * @brief Scales a tensor.
1067 *
1068 * The {@link NodeType} of this operator is <b>NODE_TYPE_SCALE_FUSION</b>.
1069 *
1070 * Input:
1071 *
1072 * * <b>x</b>, an n-dimensional tensor.
1073 * * <b>scale</b>, the scaling tensor.
1074 * * <b>bias</b>, the bias tensor.
1075 *
1076 * Output:
1077 *
1078 * * An n-dimensional tensor scaled, whose data type is the same as that of <b>x</b>x and shape is determined
1079 * * by <b>axis</b>.
1080 *
1081 * @since 3.2
1082 * @version 2.0
1083 */
1084struct ScaleFusion
1085{
1086    /** Dimensions to scale. */
1087    long axis;
1088    /** Activation function type. For details, see {@link ActivationType}. */
1089    enum ActivationType activationType;
1090};
1091
1092/**
1093 * @brief Returns the share of the input tensor.
1094 *
1095 * The {@link NodeType} of this operator is <b>NODE_TYPE_SHAPE</b>.
1096 *
1097 * Input:
1098 *
1099 * * <b>x</b>, an n-dimensional tensor.
1100 *
1101 * Output:
1102 *
1103 * * An integer array representing the dimensions of <b>x</b>.
1104 *
1105 * @since 3.2
1106 * @version 2.0
1107 */
1108struct Shape
1109{
1110};
1111
1112/**
1113 * @brief Slices a tensor of the specified size.
1114 *
1115 * The {@link NodeType} of this operator is <b>NODE_TYPE_SLICE_FUSION</b>.
1116 *
1117 * Input:
1118 *
1119 * * <b>x</b>, an n-dimensional tensor.
1120 * * <b>begin</b>, an array of integers greater than or equal to 0, specifying the start of the slice.
1121 * * <b>size</b>, an array of integers greater than or equal to 0, specifying the length of the slice.
1122 * * Assume that a dimension is <b>i</b> and 1<=size[i]<=input.shape[i]-begin[i].
1123 *
1124 * Output:
1125 *
1126 * * An n-dimensional tensor obtained.
1127 *
1128 * @since 3.2
1129 * @version 2.0
1130 */
1131struct SliceFusion
1132{
1133    /** Dimensions on which the tensor is sliced. */
1134    long[] axes;
1135};
1136
1137/**
1138 * @brief Applies the <b>softmax</b> operation on a tensor.
1139 *
1140 * The {@link NodeType} of this operator is <b>NODE_TYPE_SOFTMAX</b>.
1141 *
1142 * Input:
1143 *
1144 * * <b>x</b>, an n-dimensional tensor.
1145 *
1146 * Output:
1147 *
1148 * * Result of the <b>softmax</b> operation. It is an n-dimensional tensor with the same data type and shape
1149 * * as <b>x</b>.
1150 *
1151 * @since 3.2
1152 * @version 2.0
1153 */
1154struct Softmax
1155{
1156    /** Dimensions on which the <b>softmax</b> operation is performed. It is an integer in the range [-n, n). */
1157    long[] axis;
1158};
1159
1160/**
1161 * @brief Splits a 4D tensor into multiple blocks in the spatial dimension and then concatenates these blocks
1162 * in the batch dimension.
1163 *
1164 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPACE_TO_BATCH_ND</b>.
1165 *
1166 * Input:
1167 *
1168 * * <b>x</b>, an n-dimensional tensor.
1169 *
1170 * Output:
1171 *
1172 * A 4D tensor with the same data type as <b>x</b>. The shape is determined by <b>input</b>, <b>blockShape</b>,
1173 * and <b>paddings</b>. Assume that the input shape is [n,c,h,w], then:
1174 * \f$ output.shape[0] = n * blockShape[0] * blockShape[1]\f$<br>
1175 * \f$ output.shape[1] = c \f$<br>
1176 * \f$ output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0] \f$<br>
1177 * \f$ output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1] \f$<br>
1178 * \f$ (h + paddings[0][0] + paddings[0][1]) must be an integer multiple of \f$ blockShape[0]\f$, and
1179 * (w + paddings[1][0] + paddings[1][1]) \f$ must be an integer multiple of \f$ blockShape[1] \f$.
1180 *
1181 * @since 3.2
1182 * @version 2.0
1183 */
1184struct SpaceToBatchND
1185{
1186    /** Number of blocks. The value must be greater than 1. */
1187    long[] blockShape;
1188    /** Padding size for spatial dimensions. */
1189    long[][] paddings;
1190};
1191
1192/**
1193 * @brief Splits a tensor into multiple tensors along the axis dimension. The number of tensors is
1194 * specified by <b>outputNum</b>.
1195 *
1196 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPLIT</b>.
1197 *
1198 * Input:
1199 *
1200 * * <b>x</b>, an n-dimensional tensor.
1201 *
1202 * Output:
1203 *
1204 * * An array of n-dimensional tensors, with the same data type and dimensions.
1205 * * The data type of each tensor is the same as that of <b>x</b>.
1206 *
1207 * @since 3.2
1208 * @version 2.0
1209 */
1210struct Split
1211{
1212    /** Number of output sensors. */
1213    long outputNum;
1214    /**
1215     * Size of each tensor to output.
1216     * If <b>size_splits</b> is empty, <b>x</b> will be evenly split into tensors of the same size.
1217     * In this case, x.shape[axis] must be an integer multiple of <b>outputNum</b>.
1218     * If <b>size_splits</b> is not empty, the sum of all elements must be equal to x.shape[axis].
1219     */
1220    long[] sizeSplits;
1221    /** Target axis on which <b>x</b> is split. The data type is int. */
1222    long axis;
1223};
1224
1225/**
1226 * @brief Calculates the square root of a tensor.
1227 *
1228 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQRT</b>.
1229 *
1230 * Input:
1231 *
1232 * * <b>x</b>, an n-dimensional tensor.
1233 *
1234 * Output:
1235 *
1236 * * An n-dimensional tensor, with the same data type and shape as <b>x</b>.
1237 *
1238 * @since 3.2
1239 * @version 2.0
1240 */
1241struct Sqrt
1242{
1243};
1244
1245/**
1246 * @brief Calculates the square of the difference between two tensors.
1247 * The <b>SquaredDifference</b> operator supports subtraction between tensors.
1248 *
1249 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQUEEZE</b>.
1250 *
1251 * Input:
1252 *
1253 * * <b>x</b>, a tensor representing the minuend, which can be a number or a Boolean value.
1254 * * <b>x</b>, a tensor representing the subtrahend, which can be a number or a Boolean value.
1255 *
1256 * Output:
1257 *
1258 * * A tensor obtained. The shape of the output is determined by <b>x</b> and <b>y</b>.
1259 * * If <b>x</b> and <b>y</b> are of the same shape, the output shape is the same as that of <b>x</b> and <b>y</b>.
1260 *   If <b>x</b> and <b>y</b> are of different types, you need to perform the broadcast operation on
1261 *   <b>x</b> and <b>y</b> first.
1262 *   The precision of the output is the same as the input tensor with higher precision.
1263 *
1264 * @since 3.2
1265 * @version 2.0
1266 */
1267struct SquaredDifference
1268{
1269};
1270
1271/**
1272 * @brief Removes the dimension with length of 1 from the specified axis. The int8 quantization input is supported.
1273 * 
1274 * Assume that the shape of <b>x</b> is [2, 1, 1, 2, 2] and <b>axis</b> is [0,1], the shape of the output tensor\n
1275 * must be [2, 1, 2, 2]. That is, the dimension with the length of 1 between the 0th and 1st dimensions is removed.
1276 *
1277 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQUARED_DIFFERENCE</b>.
1278 *
1279 * Input:
1280 *
1281 * * <b>x</b>, an n-dimensional tensor.
1282 *
1283 * Output:
1284 *
1285 * *Tensor obtained.
1286 *
1287 * @since 3.2
1288 * @version 2.0
1289 */
1290struct Squeeze
1291{
1292    /** Axis on which the dimension of length 1 is to be removed. The value can be an integer or an array.
1293      * The value range of the integer is [-n, n).
1294      */
1295    long[] axis;
1296};
1297
1298/**
1299 * @brief Stacks multiple tensors along the specified axis. If the number of dimensions of each tensor is <b>n</b>
1300 * before stacking, the number of dimensions of the output tensor is <b>n</b>+1.
1301 *
1302 * The {@link NodeType} of this operator is <b>NODE_TYPE_STACK</b>.
1303 *
1304 * Input:
1305 *
1306 * * Multiple n-dimensional tensors, which are of the same data type and shape.
1307 *
1308 * Output:
1309 *
1310 * * An n+1D tensor along the specified axis, with the same data type and precision as the input tensors.
1311 *
1312 * @since 3.2
1313 * @version 2.0
1314 */
1315struct Stack
1316{
1317    /** An integer that specifies the dimension for tensor stacking. The value range is [-(n+1),(n+1)),
1318      * which means a negative number is allowed.
1319      */
1320    long axis;
1321};
1322
1323/**
1324 * @brief Slices a tensor at the specified intervals.
1325 *
1326 * The {@link NodeType} of this operator is <b>NODE_TYPE_STRIDED_SLICE</b>.
1327 *
1328 * Input:
1329 *
1330 * * <b>x</b>, an n-dimensional tensor.
1331 * * <b>begin</b>, a 1D tensor that specifies the position from which the tensor is sliced.
1332 * * The length of <b>begin</b> is <b>n</b>. begin[i] specifies the start point to slice in the ith dimension.
1333 * * <b>end</b>, a 1D tensor that specifies the end to which the tensor is sliced. The length of <b>end</b> is <b>n</b>.
1334 * * end[i] specifies the end point to slice in the ith dimension.
1335 * * <b>strides</b>, a 1D tensor that specifies the intervals for slicing. The length of <b>strides</b> is <b>n</b>.
1336 * * strides[i] specifies the intervals at which the tensor is sliced in the ith dimension. Negative values are allowed.
1337 *
1338 * For the input tensor, the shapes of <b>begin</b>, <b>end</b>, and <b>strides</b> must be the same.
1339 * The indices of <b>begin</b> and <b>end</b> start from <b>0</b>. The elements of <b>strides</b> cannot be <b>0</b>.
1340 *
1341 * Output:
1342 *
1343 * * A tensor, with the same data type as <b>x</b>. The number of dimensions of the output tensor is rank (x[0])+1.
1344 *
1345 * @since 3.2
1346 * @version 2.0
1347 */
1348struct StridedSlice
1349{
1350    /**
1351     * Mask of <b>begin</b>.
1352     * <b>beginMask</b> identifies different dimensions of <b>x</b> in binary code. For example, if bit i of <b>beginMask</b>
1353     * is set to <b>1</b>, the setting of <b>begin</b> in the ith dimension will be invalid, which means the start index of
1354     * that dimension is <b>0</b>. The default value is <b>0</b>.
1355     */
1356    long beginMask;
1357    /**
1358     * Mask of <b>end</b>. The parameter is similar to <b>begin_mask</b>.
1359     * <b>endMask</b> identifies different dimensions of <b>x</b> in binary code. For example, if bit i of <b>endMask</b> is
1360     * set to <b>1</b>, the setting of <b>end</b> in the ith dimension will be invalid, which means the tensor will be
1361     * sliced till the end in that dimension. The default value is <b>0</b>.
1362     */
1363    long endMask;
1364    /**
1365     * An integer used to mask <b>begin</b> and <b>end</b>.
1366     * The tensor does not need to be sliced in the non-zero dimensions.
1367     * <b>ellipsisMask</b> is represented in binary code. If bit i of <b>ellipsisMask</b> is <b>1</b>, elements are sliced
1368     * from the first element at strides[i] in the ith dimension until the tensor boundary.
1369     */
1370    long ellipsisMask;
1371    /**
1372     * Used to add a dimension.
1373     * <b>newAxisMask</b> identifies different dimensions of <b>x</b> in binary code. If bit i is <b>1</b>, the settings of
1374     * <b>begin</b>, <b>end</b>, and <b>strides</b> are invalid for all dimensions, and a dimension with size of 1 is added
1375     * to bit i.
1376     */
1377    long newAxisMask;
1378    /**
1379     * Used to shrink the specified dimension.
1380     * <b>shrinkAxisMask</b> is represented in binary code. If the ith bit of the <b>shrinkAxisMask</b> is <b>1</b>, all
1381     * elements in the ith dimension will be discarded, and the length of the ith dimension is shrunk to <b>1</b>.
1382     */
1383    long shrinkAxisMask;
1384};
1385
1386/**
1387 * @brief Calculates the difference between two tensors.
1388 *
1389 * The {@link NodeType} of this operator is <b>NODE_TYPE_SUB_FUSION</b>.
1390 *
1391 * Input:
1392 *
1393 * * <b>x</b>, the minuend, which is a tensor of the int or float type.
1394 * * <b>y</b>, the subtrahend, which is a tensor of the int or float type.
1395 *
1396 * Output:
1397 *
1398 * * Difference between the two tensors. The output shape is determined by<b>x</b> and <b>y</b>. 
1399 * * If <b>x</b> and <b>y</b> are of the same shape, the output tensor has the same shape as <b>x</b> and <b>y</b>.
1400 *   If <b>x</b> and <b>y</b> are of different shapes, perform the broadcast operation on <b>x</b> or <b>y</b> first.
1401 *   The precision of the output is the same as the input tensor with higher precision.
1402 *
1403 * @since 3.2
1404 * @version 2.0
1405 */
1406struct SubFusion
1407{
1408    /** Activation function type. For details, see {@link ActivationType}. */
1409    enum ActivationType activationType;
1410};
1411
1412/**
1413 * @brief Copies a tensor by the specified times.
1414 *
1415 * The {@link NodeType} of this operator is <b>NODE_TYPE_TILE_FUSION</b>.
1416 *
1417 * Input:
1418 *
1419 * * <b>x</b>, an n-dimensional tensor.
1420 * * <b>multiples</b>, a 1D tensor that specifies the number of times that the input tensor is copied
1421 * * in each dimension.
1422 * * The length <b>m</b> is not less than the number of dimensions of <b>x</b>.
1423 *
1424 * Output:
1425 *
1426 * * An m-dimensional tensor, with the same data type as <b>x</b>. If the length of <b>x</b> is the same as that of
1427 * * <b>multiples</b>, the number of dimensions of the output tensor is the same as that of the <b>x</b>, that is,
1428 * * an n-dimensional tensor is output.
1429 * * If the length of <b>multiples</b> is greater than <b>n</b>, <b>1s</b> are used to pad the dimensions of <b>x</b>.
1430 * * Then, <b>x</b> is copies the specified number of times in each dimension to obtain an m-dimensional tensor.
1431 *
1432 * @since 3.2
1433 * @version 2.0
1434 */
1435struct TileFusion
1436{
1437    /** A 1D tensor that specifies the number of times that data is copied in each dimension. The length <b>m</b> is not
1438      * less than the number of dimensions of <b>x</b>.
1439      */
1440    long[] dims;
1441};
1442
1443/**
1444 * @brief Obtains the first K maximum values along the axis and their indices.
1445 *
1446 * The {@link NodeType} of this operator is <b>NODE_TYPE_TOPK_FUSION</b>.
1447 *
1448 * Input:
1449 *
1450 * * <b>x</b>, an n-dimensional tensor.
1451 *
1452 * Output:
1453 *
1454 * * <b>output0</b>, the first K maximum values in the axis dimension.
1455 * * <b>output1</b>, indices of the first K maximum values.
1456 *
1457 * @since 3.2
1458 * @version 2.0
1459 */
1460struct TopKFusion
1461{
1462    /** The value <b>true</b> means to sort the elements in descending order; the value <b>false</b> means
1463      * the opposite.
1464      */
1465    boolean sorted;
1466    /** Specified axis. */
1467    long axis;
1468};
1469
1470/**
1471 * @brief Transposes a tensor.
1472 *
1473 * The {@link NodeType} of this operator is <b>NODE_TYPE_TRANSPOSE</b>.
1474 *
1475 * Input:
1476 *
1477 * * <b>x</b>, an n-dimensional tensor to transpose.
1478 * * <b>perm</b>, a 1D tensor that specifies the permutation. Its length is the same as the number of dimensions of
1479 * * <b>x</b>.
1480 *
1481 * Output:
1482 *
1483 * * An n-dimensional tensor, which has the same data type and quantization parameters as <b>x</b>.
1484 * * The shape is determined by the shape of <b>x</b> and <b>perm</b>.
1485 *
1486 * @since 3.2
1487 * @version 2.0
1488 */
1489struct Transpose
1490{
1491};
1492
1493/**
1494 * @brief Adds a dimension based on the value of <b>axis</b>. *
1495 *
1496 * The {@link NodeType} of this operator is <b>NODE_TYPE_UNSQUEEZE</b>.
1497 *
1498 * Input:
1499 *
1500 * * <b>x</b>, an n-dimensional tensor.
1501 *
1502 * Output:
1503 *
1504 * * Tensor output.
1505 *
1506 * @since 3.2
1507 * @version 2.0
1508 */
1509struct Unsqueeze
1510{
1511    /** Dimension to add. The value of <b>axis</b> can be an integer or an array of integers.
1512      * The value range of the integer is [-n, n).
1513      */
1514    long[] axis;
1515};
1516
1517/** @} */
1518