1/*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @addtogroup NNRt
18 * @{
19 *
20 * @brief Provides a unified interface for AI chip drivers to access OpenHarmony. * Neural Network Runtime (NNRt) is cross-chip inference computing runtime oriented to the AI field.
21 * It serves as a bridge between the upper-layer AI inference framework and the underlying acceleration chip to implement cross-chip inference computing of AI models.
22 * @since 3.2
23 * @version 1.0
24 */
25
26/**
27 * @file NodeAttrTypes.idl
28 *
29 * @brief Defines the parameters and functions of AI model operators.
30 *
31 * All structures in this file declare only operator attributes and do not contain the interfaces for executing operator functions.
32 * - 1. The operators in the file are in one-to-one correspondence with a {@link NodeType}. In model inference, {@link NodeType} is stored in nodeType of {@link Node}.
33 * - 2. Each operator has at least one input and one output. The input is the tensor received by the operator, and the output is the tensor obtained after the operator operation. The relationship between the input, operator, and output is determined by <b>inputIndex</b> and <b>outIndex</b> of the {@link Node} structure.
34 *
35 * @since 3.2
36 * @version 1.0
37 */
38
39/**
40 * @brief Defines the package path of the NNRt module.
41 *
42 * @since 3.2
43 * @version 1.0
44 */
45package ohos.hdi.nnrt.v1_0;
46
47import ohos.hdi.nnrt.v1_0.NnrtTypes;
48
49/**
50 * @brief Defines the operator of the activation type. All activation functions belong to this operator. The specific activation function type is determined by the parameters.
51 *
52 * The {@link NodeType} of this operator is <b>NODE_TYPE_ACTIVATION</b>.
53 *
54 * Inputs:
55 *
56 * * <b>x</b>, an n-dimensional tensor.
57 *
58 * Outputs:
59 *
60 * * A tensor returned after the activation function is executed.
61 *
62 * @since 3.2
63 * @version 1.0
64 */
65struct Activation
66{
67    /** Activation function type. */
68    enum ActivationType activationType;
69    /** Size factor, used for <b>LeakyReLU</b> and <b>ELU</b> activation functions. */
70    float alpha;
71    /** Minimum value, used for the <b>HardTanh</b> activation function. */
72    float minVal;
73    /** Maximum value, used for the <b>HardTanh</b> activation function. */
74    float maxVal;
75    /** Whether to use the approximation algorithm. It is used for the <b>GRLU</b> activation function. */
76    boolean approximate;
77};
78
79/**
80 * @brief Adds tensors.
81 *
82 * The {@link NodeType} of this operator is <b>NODE_TYPE_ADD_FUSION</b>.
83 *
84 * Inputs:
85 *
86 * * <b>x</b>, the first input tensor.
87 * * <b>y</b>, the second input tensor. The data type must be the same as that of the first tensor.
88 *
89 * * Outputs:
90 *
91 * * Sum of the elements of <b>x</b> and <b>y</b>. The data shape is the same as the one after broadcasting, and the data type is the one with higher precision of the two inputs.
92 * If <b>activationType</b> is configured, the specified activation function will be called before the output is returned.
93 *
94 * @since 3.2
95 * @version 1.0
96 */
97struct AddFusion
98{
99    /** Activation function type. For details, see {@link ActivationType}. */
100    enum ActivationType activationType;
101};
102
103/**
104 * @brief Returns the first K indices or values of a cross-axis tensor.
105 * 
106 * The {@link NodeType} of this operator is <b>NODE_TYPE_ARGMAX_FUSION</b>.
107 *
108 *
109 * Inputs:
110 *
111 * * <b>x</b>, a tensor of shape <b>(N,*)</b>, where * indicates any number of additional dimensions.
112 *
113 * Outputs:
114 *
115 * * First <b>K</b> indices or values before the maximum input tensor on the axis.
116 *
117 * @since 3.2
118 * @version 1.0
119 */
120struct ArgMaxFusion
121{
122    /** Target axis where the maximum indices or values are obtained. */
123    long axis;
124    /** First <b>K</b> maximum values on the axis. */
125    long topK;
126    /** Whether to keep the output dimensions the same as the input dimensions. */
127    boolean keepDims;
128    /** Return the index if the value is <b>false</b>. Return the value if the value is <b>true</b>. The default value is <b>false</b>. */
129    boolean outMaxValue;
130};
131
132/**
133 * @brief Applies a 2D average pooling on the input tensor. The int8 quantization input is supported.
134 *
135 * The {@link NodeType} of this operator is <b>NODE_TYPE_AVGPOOL_FUSION</b>.
136 *
137 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>. In other cases, <b>padding</b> must be <b>0</b>.
138 *
139 * Inputs:
140 * 
141 * * <b>x</b>, an n-dimensional tensor.
142 *
143 * Outputs:
144 *
145 * * Tensor after average pooling.
146 *
147 * @since 3.2
148 * @version 1.0
149 */
150struct AvgPoolFusion
151{
152    /**
153     * Kernel size used to obtain the average value. It is an int array [kernel_height, kernel_weight] with length of 2.
154     * The first number indicates the kernel height, and the second number indicates the kernel width.
155     */
156    long[] kernelSize;
157    /**
158     * Distance of kernel moving. The value is an int array [stride_height, stride_weight] with length of 2.
159     * The first number indicates the moving size in height, and the second number indicates the moving size in width.
160     */
161    long[] strides;
162    /** <b>x</b> is padded with an int array [top, bottom, left, right] with length of 4, and the nearest neighbor values are used for padding. */
163    long[] pad;
164    /** Padding method */
165    enum PadMode padMode;
166    /** Numerical operation mode of the output tensor */
167    enum RoundMode roundMode;
168    /** Format of the data during calculation. For details, see {@link Format}. */
169    enum Format format;
170    /** Whether to do global pooling */
171    boolean global;
172    /** Activation function type. For details, see {@link ActivationType}. */
173    enum ActivationType activationType;
174};
175
176/**
177 * @brief Divides the batch dimension of a 4D tensor into small blocks by <b>block_shape</b>, and interleaves these blocks back into the spatial dimension.
178 *
179 * The {@link NodeType} of this operator is <b>NODE_TYPE_BATCH_TO_SPACE_ND</b>.
180 *
181 * Inputs:
182 *
183 * * <b>x</b>, an n-dimensional tensor.
184 *
185 * Outputs:
186 *
187 * * Output tensor. Assume that the shape of <b>x</b> is (n,h,w,c) and the shape of output is (n',h',w',c'):
188 * \f$ n' = n / (block_shape[0] * block_shape[1])\f$<br>
189 * \f$ h' = h * block_shape[0] - crops[0][0] - crops[0][1] \f$<br>
190 * \f$ w' = w * block_shape[1] - crops[1][0] - crops[1][1] \f$<br>
191 * \f$ c'= c \f$
192 *
193 * @since 3.2
194 * @version 1.0
195 */
196struct BatchToSpaceND
197{
198    /** Block size, which is an array [height_block, weight_block] with length of 2. */
199    long[] blockShape;
200    /**
201     * Crop values for the spatial dimension.
202     * It is a 2D array [crop0_start, crop0_end], [crop1_start, crop1_end] with the shape of (2, 2).
203     */
204    long[][] crops;
205};
206
207/**
208 * @brief Offsets the data in each dimension of the input tensor.
209 *
210 * The {@link NodeType} of this operator is <b>NODE_TYPE_BIAS_ADD</b>.
211 *
212 * Inputs:
213 *
214 * * <b>x</b>, an n-dimensional tensor.
215 * * <b>bias</b>, the bias tensor.
216 *
217 * Outputs:
218 *
219 * * Output tensor, which is the sum of the input tensor and the bias in each dimension.
220 *
221 * @since 3.2
222 * @version 1.0
223 */
224struct BiasAdd
225{
226};
227
228/**
229 * @brief Converts the tensor data type.
230 *
231 * The {@link NodeType} of this operator is <b>NODE_TYPE_CAST</b>.
232 *
233 * Inputs:
234 *
235 * * <b>x</b>, an n-dimensional tensor.
236 * * <b>type</b>, the target type of the data.
237 *
238 * Outputs:
239 *
240 * * A tensor with the specified data type.
241 *
242 * @since 3.2
243 * @version 1.0
244 */
245struct Cast
246{
247};
248
249/**
250 * @brief Connects tensors in the specified axis or connects input tensors along with the given axis.
251 *
252 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONCAT</b>.
253 *
254 * Inputs:
255 *
256 * * Tensors with the same dimension.
257 *
258 * Outputs:
259 *
260 * * Result of the tensors connected.
261 *
262 * @since 3.2
263 * @version 1.0
264 */
265struct Concat
266{
267    /** Axis specified. The value must be less than the number of dimensions of the input tensor. */
268    long axis;
269};
270
271/**
272 * @brief Calculates the 2D convolution on a 4D tensor.
273 *
274 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONV2D_FUSION</b>.
275 *
276 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>. In other cases, <b>padding</b> must be <b>0</b>.
277 *
278 * Inputs:
279 *
280 * * <b>x</b>, a 4D tensor in NHWC format.
281 * * <b>weight</b>, a convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
282 * The value of <b>inChannel</b> divided by <b>group</b>must be an integer.
283 * * <b>bias</b>: bias of the convolution. It is an array with length of <b>[outChannel]</b>. In the quantization scenario, <b>bias</b> does not need to be quantized.
284 * If quantization is required, the input data must be of the int32 type. The actual quantization parameter is determined by <b>x</b> and <b>weight</b>.
285 *
286 * Outputs:
287 *
288 * * Convolution output.
289 *
290 * @since 3.2
291 * @version 1.0
292 */
293struct Conv2DFusion
294{
295    /** Size (height and width) of the convolution kernel */
296    long[] kernelSize;
297    /** Movement stride of the convolution kernel in height and weight. It is an int array [strideHeight, strideWidth] with length of 2. */
298    long[] stride;
299    /**
300     * Dilation size of the convolution kernel in height and weight. It is an int array [dilationHeight, dilationWidth].
301     * The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>x</b>.
302     */
303    long[] dilation;
304    /** Padding mode. For details, see {@link PadMode}. */
305    enum PadMode padMode;
306    /** Padding around <b>x</b> in the height and width directions. It is an int array [top, bottom, left, right] with length of 4. */
307    long[] padList;
308    /**
309     * Splits <b>x</b> into groups by <b>inChannel</b>. The <b>group</b> value is of the int type.
310     * If <b>group</b> is <b>1</b>, it is a conventional convolution.
311     * If <b>group</b> is <b>inChannel</b>, it is depthwiseConv2d. In this case, group==in_channel==out_channel.
312     * If <b>group</b> is greater than <b>1</b> and less than <b>inChannel</b>, it is group convolution. In this case, out_channel==group.
313     */
314    long group;
315    /** Number of input channels. */
316    long inChannel;
317    /** Number of output channels. */
318    long outChannel;
319    /** Activation function type. For details, see {@link ActivationType}. */
320    enum ActivationType activationType;
321};
322
323/**
324 * @brief Calculates a 2D transposed convolution on a 4D tensor.
325 *
326 * The {@link NodeType} of this operator is <b>NODE_TYPE_CONV2D_TRANSPOSE_FUSION</b>.
327 *
328 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>. In other cases, <b>padding</b> must be <b>0</b>.
329 *
330 * Inputs:
331 *
332 * * <b>x</b>, a 4D tensor in NHWC format.
333 * * <b>weight</b>, a convolution weight in [outChannel, kernelHeight, kernelWidth, inChannel/group] format.
334 * The value of <b>inChannel</b> divided by <b>group</b>must be an integer.
335 * * <b>bias</b>: bias of the convolution. It is an array with length of <b>[outChannel]</b>. In the quantization scenario, <b>bias</b> does not need to be quantized.
336 * If quantization is required, the input data must be of the int32 type. The actual quantization parameter is determined by <b>x</b> and <b>weight</b>.
337 *
338 * Outputs:
339 *
340 * * N-dimensional tensor.
341 *
342 * @since 3.2
343 * @version 1.0
344 */
345struct Conv2dTransposeFusion
346{
347    /** Size (height and width) of the convolution kernel */
348    long[] kernelSize;
349    /** Movement stride of the convolution kernel in height and weight. It is an int array [strideHeight, strideWidth] with length of 2. */
350    long[] stride;
351    /** Dilation size of the convolution kernel in height and weight. It is an int array [dilationHeight, dilationWidth] with length of 2.
352     * The value must be greater than or equal to <b>1</b> and cannot exceed the height and width of <b>x</b>.
353     */
354    long[] dilation;
355    /** Padding mode. For details, see {@link PadMode}. */
356    enum PadMode padMode;
357    /** Padding around the input <b>x</b> in the height and width directions. It is an int array [top, bottom, left, right] with length of 4. */
358    long[] padList;
359    /**
360     * Splits <b>x</b> into groups by <b>inChannel</b>.
361     * If <b>group</b> is <b>1</b>, it is a conventional convolution.
362     * If group is greater than <b>1</b> and less than or equal to <b>inChannel</b>, this is a group convolution.
363     */
364    long group;
365    /** Number of input channels. */
366    long inChannel;
367    /** Number of output channels. */
368    long outChannel;
369    /** Activation function type. For details, see {@link ActivationType}. */
370    enum ActivationType activationType;
371    /**
372     * A list of integer array with two elements, specifying the paddings along the height and width of the output tensor.
373     */
374    long[] outputPaddings;
375};
376
377/**
378 * @brief Divides the first tensor by the second tensor element-wise.
379 *
380 * The {@link NodeType} of this operator is <b>NODE_TYPE_DIV_FUSION</b>.
381 *
382 * Inputs:
383 *
384 * * <b>x1</b>, a tensor of the int or float type.
385 * * <b>x2</b>, a tensor of the int or float type.
386 *
387 * Outputs:
388 *
389 * * Quotient of the two inputs.
390 *
391 * @since 3.2
392 * @version 1.0
393 */
394struct DivFusion
395{
396    /** Activation function type. For details, see {@link ActivationType}. */
397    enum ActivationType activationType;
398};
399
400/**
401 * @brief Performs an element-wise operation.
402 *
403 * The {@link NodeType} of this operator is <b>NODE_TYPE_ELTWISE</b>.
404 *
405 * Inputs:
406 *
407 * * <b>x1</b>, the first input tensor.
408 * * <b>x2</b>, the second input tensor.
409 *
410 * Outputs:
411 *
412 * * A tensor with the same data type and shape as <b>x1</b>.
413 *
414 * @since 3.2
415 * @version 1.0
416 */
417struct Eltwise
418{
419    /** Element-wise operation type. For details, see {@link EltwiseMode}. */
420    enum EltwiseMode mode;
421};
422
423/**
424 * @brief Adds an additional dimension to a tensor at the given axis.
425 *
426 * The {@link NodeType} of this operator is <b>NODE_TYPE_EXPAND_DIMS</b>.
427 *
428 * Inputs:
429 *
430 * * <b>x</b>, an n-dimensional tensor.
431 * * <b>axis</b>, index of the dimension to add. The value is of the int32_t type and must be a constant in the range [-dim-1, dim].
432 *
433 * Outputs:
434 *
435 * * Operator with an additional dimension at the given axis.
436 *
437 * @since 3.2
438 * @version 1.0
439 */
440struct ExpandDims
441{
442};
443
444/**
445 * @brief Creates a tensor of the specified dimensions and fills it with a scalar.
446 *
447 * The {@link NodeType} of this operator is <b>NODE_TYPE_FILL</b>.
448 *
449 * Inputs:
450 *
451 * * <b>value</b>, a scalar used to fill the tensor.
452 * * <b>shape</b>, which specifies the dimensions of the tensor to create.
453 * Outputs:
454 *
455 * * Tensor filled by the scaler.
456 *
457 * @since 3.2
458 * @version 1.0
459 */
460struct Fill
461{
462};
463
464/**
465 * @brief Applies full connection for the input data.
466 *
467 * The {@link NodeType} of this operator is <b>NODE_TYPE_FULL_CONNECTION</b>.
468 *
469 * When <b>useAxis</b> is <b>true</b>, <b>axis</b> must be set. When <b>useAxis</b> is <b>false</b>, <b>axis</b> is <b>0</b>.
470 *
471 * Inputs:
472 *
473 * * <b>x</b>, an n-dimensional tensor.
474 * * <b>weight</b>, a weight tensor for a full connection.
475 * * <b>bias</b>, a full-connection bias. In quantization scenarios, a quantized parameter is not required. If quantization is required, the data must be of the int32 type. The actual quantization parameter is determined by <b>x</b> and <b>weight</b>.
476 * *
477 * Outputs:
478 *
479 * * Normalized tensor.
480 *
481 * @since 3.2
482 * @version 1.0
483 */
484struct FullConnection
485{
486    /** Whether to use the bias. */
487    boolean hasBias;
488    /** Whether to use the axis. */
489    boolean useAxis;
490    /** Axis specified for the full connection. The specified axis and its following axes are converted into a 1D tensor and then apply the full connection. */
491    long axis;
492    /** Activation function type. For details, see {@link ActivationType}. */
493    enum ActivationType activationType;
494};
495
496/**
497 * @brief Performs batch normalization for a tensor.
498 *
499 * The {@link NodeType} of this operator is <b>NODE_TYPE_FUSED_BATCH_NORM</b>.
500 *
501 * Inputs:
502 *
503 * * <b>x</b>: a tensor of shape [N, ..., C], that is, the nth dimension is the number of channels.
504 * * <b>scale</b>, a 1D tensor of the scaling factor used to scale the first normalized tensor.
505 * * <b>offset</b>, a 1D tensor used to move to the first normalized tensor.
506 * * <b>mean</b>, a 1D tensor of the overall mean value. It is used only for inference. In case of training, this parameter must be left empty.
507 * * <b>variance</b>, a 1D tensor used for the overall variance. It is used only for inference. In case of training, this parameter must be left empty.
508 *
509 * Outputs:
510 *
511 * * Normalized tensor.
512 *
513 * @since 3.2
514 * @version 1.0
515 */
516struct FusedBatchNorm
517{
518    /** A small value close to zero. It is used to ensure that the divisor is not 0. */
519    float epsilon;
520};
521
522/**
523 * @brief Returns the slice of the input tensor based on the specified index and axis.
524 *
525 * The {@link NodeType} of this operator is <b>NODE_TYPE_GATHER</b>.
526 *
527 * Inputs:
528 *
529 * * <b>x</b>, an n-dimensional tensor.
530 * * <b>inputIndices</b>, indices of elements of the original tensor. The value is an array of the int type and must be in the range [0, x.shape[axis]).
531 * * <b>axis</b>, the axis on which <b>x</b> is sliced. The value is an array with one element of the int32_t type.
532 *
533 * Outputs:
534 *
535 * * Sliced tensor.
536 *
537 * @since 3.2
538 * @version 1.0
539 */
540struct Gather
541{
542};
543
544/**
545 * @brief Applies layer normalization for a tensor from the specified axis.
546 *
547 * The {@link NodeType} of this operator is <b>NODE_TYPE_LAYER_NORM_FUSION</b>.
548 *
549 * Inputs:
550 *
551 * * <b>x</b>, an n-dimensional tensor.
552 * * <b>gamma</b>, an m-dimensional tensor. The dimensions of <b>gamma</b> must be the same as the shape of the part of the input tensor to normalize.
553 * * <b>beta</b>, an m-dimensional tensor with the same shape as <b>gamma</b>.
554 *
555 * Outputs:
556 *
557 * * An n-dimensional tensor, with the same data type and shape as the input tensor.
558 *
559 * @since 3.2
560 * @version 1.0
561 */
562struct LayerNormFusion
563{
564    /** Start axis of <b>x</b> to apply layer normalization. */
565    long beginNormAxis;
566    /** A value added to the denominator for numerical stability. */
567    float epsilon;
568    /** Whether to perform an element-wise operation. */
569    boolean elementwiseAffine;
570    /** Start axis of the parameter input (gamma, beta) to apply layer normalization. The value must be in the range [-n, n). */
571    long beginParamsAxis;
572};
573
574/**
575 * @brief Calculates the result of <b>x1</b> <= <b>x2</b> element-wise.
576 *
577 * The {@link NodeType} of this operator is <b>NODE_TYPE_LESS_EQUAL</b>.
578 *
579 * Inputs:
580 *
581 * * <b>x1</b>, which can be a number, a Boolean value, or a tensor whose data type is number or Boolean.
582 * * <b>x2</b>, which can be a number or a Boolean value if <b>x1</b> is a tensor; or a tensor with the data type of number or Boolean if <b>x1</b> is not a tensor.
583 *
584 * Outputs:
585 *
586 * * A tensor with the data type of Boolean. When a quantization model is used, the quantization parameters of the output cannot be omitted. However, values of the quantization parameters do not affect the result.
587 *
588 * @since 3.2
589 * @version 1.0
590 */
591struct LessEqual
592{
593};
594
595/**
596 * @brief Calculates the matrix product of <b>x1</b> and <b>x2</b>.
597 *
598 * The {@link NodeType} of this operator is <b>NODE_TYPE_MATMUL_FUSION</b>.
599 *
600 * Inputs:
601 *
602 * * <b>x1</b>, an n-dimensional tensor, whose data type can be number or Boolean.
603 * * <b>x2</b>, an n-dimensional tensor, whose data type can be number or Boolean.
604 *
605 * Outputs:
606 *
607 * * Matrix product of the inputs. When type! When = DATA_TYPE_UNKNOWN, the data type of the output is determined by <b>type</b>. When type==DATA_TYPE_UNKNOWN,
608 * the data type of the output depends on the data type converted during the calculation of <b>x1</b> and <b>x2</b>.
609 *
610 * @since 3.2
611 * @version 1.0
612 */
613struct MatMulFusion
614{
615    /** Whether to transpose the <b>x1</b> matrix. */
616    boolean transposeA;
617    /** Whether to transpose the <b>x2</b> matrix. */
618    boolean transposeB;
619    /** Activation function type. For details, see {@link ActivationType}. */
620    enum ActivationType activationType;
621};
622
623/**
624 * @brief Calculates the maximum of <b>x1</b> and <b>x2</b> element-wise. The inputs of <b>x1</b> and <b>x2</b> comply with the implicit type conversion rules to make the data types are consistent.
625 * 
626 * The inputs must be two tensors or one tensor and one scalar. When the input is two tensors, the data types cannot be Boolean at the same time, and their shapes
627 * can be broadcast to the same. When the inputs are one tensor and one scalar, the scalar must be a constant.
628 *
629 * The {@link NodeType} of this operator is <b>NODE_TYPE_MAXIMUM</b>.
630 *
631 * Inputs:
632 *
633 * * <b>x1</b>, an n-dimensional tensor, whose data type can be number or Boolean.
634 * * <b>x2</b>, an n-dimensional tensor, whose data type can be number or Boolean.
635 *
636 * Outputs:
637 *
638 * * Maximum value of the elements of the two tensors.
639 *
640 * @since 3.2
641 * @version 1.0
642 */
643struct Maximum
644{
645};
646
647/**
648 * @brief Applies a 2D maximum pooling over an input tensor.
649 *
650 * The {@link NodeType} of this operator is <b>NODE_TYPE_MAX_POOL_FUSION</b>.
651 *
652 * When padMode==PAD_MODE_PAD, <b>padList</b> must be greater than or equal to <b>0</b>. In other cases, <b>padding</b> must be <b>0</b>.
653 *
654 * Inputs:
655 *
656 * * <b>x</b>, an n-dimensional tensor.
657 *
658 * Outputs:
659 *
660 * * Maximum value of the elements of the two tensors.
661 *
662 * @since 3.2
663 * @version 1.0
664 */
665struct MaxPoolFusion
666{
667    /** Size of the kernel used to take the maximum value. It is an int array [kernel_height, kernel_weight] with length of 2. */
668    long[] kernelSize;
669    /** Distance of kernel moving. It is an int array with two elements. */
670    long[] strides;
671    /** Array to pad. */
672    long[] pad;
673    /** Padding mode. For details, see {@link PadMode}. */
674    enum PadMode padMode;
675    /** Format of the tensor data. For details, see {@link Format}. */
676    enum Format format;
677    /** Whether to do global pooling */
678    boolean global;
679    /** Activation function type. For details, see {@link ActivationType}. */
680    enum ActivationType activationType;
681};
682
683/**
684 * @brief Multiplies the elements in the same position of <b>x1</b> and <b>x2</b> to obtain output.
685 *
686 * If the shapes of <b>x1</b> and <b>x2</b> are different, expand <b>x1</b> and <b>x2</b> to the same shape through broadcast and then perform the multiplication.
687 * The {@link NodeType} of this operator is <b>NODE_TYPE_MUL_FUSION</b>.
688 *
689 * Inputs:
690 *
691 * * <b>x1</b>, a tensor of the int or float type.
692 * * <b>x2</b>, a tensor of the int or float type.
693 *
694 * Outputs:
695 *
696 * * Product of each element of <b>x1</b> and <b>x2</b>.
697 *
698 * @since 3.2
699 * @version 1.0
700 */
701struct MulFusion
702{
703    /** Activation function type. For details, see {@link ActivationType}. */
704    enum ActivationType activationType;
705};
706
707/**
708 * @brief Generates a one-hot tensor based on the specified locations.
709 *
710 * The locations specified by <b>indices</b> are determined by <b>on_value</b>, and other locations are determined by <b>off_value</b>.
711 *
712 * The {@link NodeType} of this operator is <b>NODE_TYPE_ONE_HOT</b>.
713 *
714 * Inputs:
715 *
716 * * <b>indices</b>, an n-dimensional tensor. Each element in <b>indices</b> determines the location of <b>on_value</b> in each one-hot vector.
717 * * <b>depth</b>, an integer scalar that determines the depth of the one-hot vector. The value of <b>depth</b> must be greater than <b>0</b>.
718 * * <b>on_value</b>, a scalar that specifies a valid value in the one-hot vector.
719 * * <b>off_value</b>, a scalar that specifies the values of other locations in the one-hot vector except the valid value.
720 *
721 * Outputs:
722 *
723 * * An (n+1)-dimensional tensor if <b>indices</b> is an n-dimensional tensor. The output shape is determined by <b>indices</b> and <b>axis</b>.
724 *
725 * @since 3.2
726 * @version 1.0
727 */
728struct OneHot
729{
730    /**
731     * An integer scalar that specifies the dimension for inserting the one-hot. Assume that the shape of <b>indices</b> is [N, C], and the value of <b>depth</b> is D. 
732     * When <b>axis</b> is <b>0</b>, the shape of the output is [D, N, C].
733     * When <b>axis</b> is <b>-1</b>, the shape of the output is [N, C, D].
734     * When <b>axis</b> is <b>-1</b>, the shape of the output is [N, D, C].
735     *
736     */
737    long axis;
738};
739
740/**
741 * @brief Pads the input tensor.
742 *
743 * The {@link NodeType} of this operator is <b>NODE_TYPE_PAD_FUSION</b>.
744 *
745 * When paddingMode==PADDING_MODE_CONSTANT, <b>constantValue</b> must be set. The default value of <b>constantValue</b> is <b>0</b>.
746 *
747 * Inputs:
748 *
749 * * <b>x</b>, an n-dimensional tensor.
750 * * <b>paddings</b>, a 2D tensor that specifies the length to add in each dimension. The shape is [n, 2]. <b>paddings[i][0]</b> indicates the number of paddings to add before the input tensor in ith dimension.
751 * <b>paddings[i][1]</b> indicates the number of paddings to add after the input tensor in ith dimension.
752 *
753 * Outputs:
754 *
755 * * An n-dimensional tensor after padding, with the same dimensions and data type as <b>x</b>. The shape is determined by <b>x</b> and <b>paddings</b>.
756 * output.shape[i] = input.shape[i] + paddings[i][0]+paddings[i][1].
757 *
758 * @since 3.2
759 * @version 1.0
760 */
761struct PadFusion
762{
763    /**
764     * A 2D tensor, specifying the length to add in each dimension. The shape is [n, 2]. <b>paddings[i][0]</b> indicates the number of paddings to add before the input <b>x</b> in the ith dimension.
765     * <b>paddings[i][1]</b> indicates the number of paddings to add after the input <b>x</b> in the ith dimension.
766     * The meaning of this parameter is the same as that of <b>paddings</b> input.
767     */
768    long[][] paddings;
769    /**
770      * Padding mode.
771     * For details, see {@link PaddingMode}.
772     */
773    enum PaddingMode paddingMode;
774    /**
775     * A constant with the same data type as <b>x</b>. It specifies the value to add in the pad operation.
776     * This parameter is valid only when paddingMode==PADDING_MODE_CONSTANT. The default value is <b>0</b>.
777     */
778    float constantValue;
779};
780
781/**
782 * @brief Calculates the <b>y</b> power of each element in <b>x</b>. The inputs must be two tensors or one tensor and one scalar.
783 *
784 * When the inputs are two tensors, their data types cannot be Boolean at the same time, and their shapes must be the same. When the inputs are one tensor and one scalar, the scalar must be a constant.
785 *
786 * The {@link NodeType} of this operator is <b>NODE_TYPE_POW_FUSION</b>.
787 *
788 * The <b>x' = scale*x+shift</b> operation is performed for each element of <b>x</b>, and then the <b>y</b> power of <b>x'</b> is calculated.
789 *
790 * Inputs:
791 *
792 * * <b>x</b>, a number, a Boolean value, or a tensor whose data type is number or Boolean.
793 * * <b>y</b>, a number, a Boolean value, or a tensor whose data type is number or Boolean.
794 *
795 * Outputs:
796 *
797 * * A tensor, whose shape is determined by the shape of <b>x</b> and <b>y</b> after broadcasting.
798 *
799 * @since 3.2
800 * @version 1.0
801 */
802struct PowFusion
803{
804    /** Scale the value of <b>x</b>. */
805    float scale;
806    /** Increase or decrease the value of <b>x</b> after scaling. */
807    float shift;
808};
809
810/**
811 * @brief Applies the PReLU activation of <b>x</b> and <b>weight</b>.
812 *
813 * The {@link NodeType} of this operator is <b>NODE_TYPE_PRELU_FUSION</b>.
814 *
815 * Inputs:
816 *
817 * * <b>x</b>, an n-dimensional tensor. If <b>n</b> is greater than or equal to 2, <b>x</b> must be [BatchSize, ..., Channels]. The second dimension is the number of channels.
818 * * <b>weight</b>, a 1D tensor. The length of <b>weight</b> must be 1 or equal to the number of channels. If the length of <b>weight</b> is 1, all channels share the same weight.
819 * If the length of <b>weight</b> is equal to the number of channels, each channel exclusively has a weight. If <b>n</b> of <b>x</b> is less than 2, the <b>weight</b> length must be 1.
820 *
821 * Outputs:
822 *
823 * * PReLU activation value of <b>x</b>, with the same shape and data type as <b>x</b>.
824 *
825 * @since 3.2
826 * @version 1.0
827 */
828struct PReLUFusion
829{
830    /**
831     * Whether to enable weight sharing for the parameter validity check.
832     * If the length of <b>weight</b> is <b>1</b>, <b>channelShared</b> must be <b>true</b>. Otherwise, <b>channelShared</b> is <b>false</b>.
833     */
834    boolean channelShared;
835};
836
837/**
838 * @brief Converts the data type.
839 *
840 * The {@link NodeType} of this operator is <b>NODE_TYPE_QUANT_DTYPE_CAST</b>.
841 *
842 * Inputs:
843 *
844 * * <b>x</b>, an n-dimensional tensor.
845 *
846 * Outputs:
847 *
848 * * Tensor after the data type conversion.
849 *
850 * @since 3.2
851 * @version 1.0
852 */
853struct QuantDTypeCast
854{
855    /** Data type of the input tensor. */
856    long srcT;
857    /** Data type of the output tensor. */
858    long dstT;
859};
860
861/**
862 * @brief Reduces the dimensions of a tensor.
863 *
864 * The {@link NodeType} of this operator is <b>NODE_TYPE_REDUCE_FUSION</b>.
865 *
866 If <b>mode</b> is <b>REDUCE_ALL</b>, <b>REDUCE_PROD</b>, or <b>REDUCE_MEAN</b> and <b>reduce_to_end</b> is <b>true</b>, the output is the reduced value multiplied by <b>coeff</b>.
867 *
868 * Inputs:
869 *
870 * * <b>x</b>, an n-dimensional tensor, where <b>n</b> is less than 8.
871 * * <b>axis</b>, a 1D tensor that specifies the dimension to reduce. The value range of each element in axis is [–n, n).
872 *
873 * Outputs:
874 *
875 * * An m-dimensional tensor, with the same data type as <b>x</b>. If <b>keepDims</b> is <b>false</b>, m < n. If <b>keepDims</b> is <b>true</b>, m==n.
876 *
877 * @since 3.2
878 * @version 1.0
879 */
880struct ReduceFusion
881{
882    /** Whether to keep the dimensions remain unchanged. */
883    boolean keepDims;
884    /** Algorithm used to reduce the tensor dimensions. For details, see {@link ReduceMode}. */
885    enum ReduceMode mode;
886    /**
887     * If this parameter is set to <b>true</b>, the first element is obtained from axis and set to <b>i</b>,
888     * and then <b>axis</b> will be changed to [i,i+1, ...,n-1,n].
889     * For example, if <b>reduceToEnd</b> is <b>true</b>, <b>axis</b> is [2,4], and the number of dimensions of <b>x</b> is 7, then <b>axis</b> will be [2,3,4,5,6].
890     */
891    boolean reduceToEnd;
892    /** Coefficient. */
893    float coeff;
894};
895
896/**
897 * @brief Reshapes a tensor.
898 *
899 * The {@link NodeType} of this operator is <b>NODE_TYPE_RESHAPE</b>.
900 *
901 * Inputs:
902 *
903 * * <b>x</b>, an n-dimensional tensor.
904 * * <b>InputShape</b>, a 1D tensor that specifies the shape of the output tensor. It must be a constant.
905 *
906 * Outputs:
907 *
908 * * A tensor of the specified shape. The data type is the same as that of <b>x</b>.
909 *
910 * @since 3.2
911 * @version 1.0
912 */
913struct Reshape
914{
915};
916
917/**
918 * @brief Resizes a tensor.
919 *
920 * The {@link NodeType} of this operator is <b>NODE_TYPE_RESIZE</b>.
921 *
922 * The parameter combination of this operator can implement the <b>Resize</b> function.
923 * For example, to implement bilinear interpolation on the four corners of an image that is precisely aligned, set:
924 * method = RESIZE_METHOD_LINEAR
925 * coordinateTransformMode = COORDINATE_TRANSFORM_MODE_ALIGN_CORNERS
926 *
927 * Inputs:
928 *
929 * * <b>x</b>, a 4D tensor in the [batchSize, height, width, channels] (NHWC) format.
930 *
931 * Outputs:
932 *
933 * * An n-dimensional tensor, with the same shape and data type as <b>x</b>.
934 *
935 * @since 3.2
936 * @version 1.0
937 */
938struct Resize
939{
940    /** Method used for resizing. For details, see {@link ResizeMethod}. */
941    enum ResizeMethod method;
942    /** Height of the 4D tensor after resizing. */
943    long newHeight;
944    /** Width of the 4D tensor after resizing. */
945    long newWidth;
946    /** Whether to maintain the height/width ratio of <b>x</b> after resizing. */
947    boolean preserveAspectRatio;
948    /**
949     * Coordinate transformation method. For details, see {@link CoordinateTransformMode}.
950     */
951    enum CoordinateTransformMode coordinateTransformMode;
952    /** Cubic coefficient, used when <b>method</b> is <b>RESIZE_METHOD_CUBIC</b>. */
953    float cubicCoeff;
954    /** When excludeOutside==1, the sampling weight that exceeds the boundary of <b>x</b> is set to <b>0</b>, and other weights are normalized. */
955    long excludeOutside;
956    /** Value to interpolate, which is used only when <b>x</b> is cropped. The sampling weight that exceeds the boundary is set to <b>extrapolationValue</b>. */
957    float extrapolationValue;
958    /** Nearest neighbor interpolation algorithm, used when <b>method</b> is <b>RESIZE_METHOD_NEAREST</b>. For details, see {@link NearestMode}. */
959    enum NearestMode nearestMode;
960};
961
962/**
963 * @brief Calculates the reciprocal of the square root of a tensor.
964 *
965 * The {@link NodeType} of this operator is <b>NODE_TYPE_RSQRT</b>.
966 *
967 * Inputs:
968 *
969 * *<b>x</b>, an n-dimensional tensor, where <b>n</b> is less than 8. Each element of the tensor cannot be less than 0.
970 *
971 * Outputs:
972 *
973 * * An n-dimensional tensor, with the same shape and data type as <b>x</b>. 
974 *
975 * @since 3.2
976 * @version 1.0
977 */
978struct Rsqrt
979{
980};
981
982/**
983 * @brief Scales a tensor.
984 *
985 * The {@link NodeType} of this operator is <b>NODE_TYPE_SCALE_FUSION</b>.
986 *
987 * Inputs:
988 *
989 * * <b>x</b>, an n-dimensional tensor.
990 * * <b>scale</b>, the scaling tensor.
991 * * <b>bias</b>, the bias tensor.
992 *
993 * Outputs:
994 *
995 * * An n-dimensional tensor scaled, whose data type is the same as that of <b>x</b>x and shape is determined by <b>axis</b>.
996 *
997 * @since 3.2
998 * @version 1.0
999 */
1000struct ScaleFusion
1001{
1002    /** Dimensions to scale. */
1003    long axis;
1004    /** Activation function type. For details, see {@link ActivationType}. */
1005    enum ActivationType activationType;
1006};
1007
1008/**
1009 * @brief Returns the share of the input tensor.
1010 *
1011 * The {@link NodeType} of this operator is <b>NODE_TYPE_SHAPE</b>.
1012 *
1013 * Inputs:
1014 *
1015 * * <b>x</b>, an n-dimensional tensor.
1016 *
1017 * Outputs:
1018 *
1019 * * An integer array representing the dimensions of <b>x</b>.
1020 *
1021 * @since 3.2
1022 * @version 1.0
1023 */
1024struct Shape
1025{
1026};
1027
1028/**
1029 * @brief Slices a tensor of the specified size.
1030 *
1031 * The {@link NodeType} of this operator is <b>NODE_TYPE_SLICE_FUSION</b>.
1032 *
1033 * Inputs:
1034 *
1035 * * <b>x</b>, an n-dimensional tensor.
1036 * * <b>begin</b>, an array of integers greater than or equal to 0, specifying the start of the slice.
1037 * * <b>size</b>, an array of integers greater than or equal to 0, specifying the length of the slice. Assume that a dimension is <b>i</b> and 1<=size[i]<=input.shape[i]-begin[i].
1038 *
1039 * Outputs:
1040 *
1041 * * An n-dimensional tensor obtained.
1042 *
1043 * @since 3.2
1044 * @version 1.0
1045 */
1046struct SliceFusion
1047{
1048    /** Dimensions on which the tensor is sliced. */
1049    long[] axes;
1050};
1051
1052/**
1053 * @brief Applies the <b>softmax</b> operation on a tensor.
1054 *
1055 * The {@link NodeType} of this operator is <b>NODE_TYPE_SOFTMAX</b>.
1056 *
1057 * Inputs:
1058 *
1059 * * <b>x</b>, an n-dimensional tensor.
1060 *
1061 * Outputs:
1062 *
1063 * * Result of the <b>softmax</b> operation. It is an n-dimensional tensor with the same data type and shape as <b>x</b>.
1064 *
1065 * @since 3.2
1066 * @version 1.0
1067 */
1068struct Softmax
1069{
1070    /** Dimensions on which the <b>softmax</b> operation is performed. It is an integer in the range [-n, n). */
1071    long[] axis;
1072};
1073
1074/**
1075 * @brief Splits a 4D tensor into multiple blocks in the spatial dimension and then concatenates these blocks in the batch dimension.
1076 *
1077 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPACE_TO_BATCH_ND</b>.
1078 *
1079 * Inputs:
1080 *
1081 * * <b>x</b>, an n-dimensional tensor.
1082 *
1083 * Outputs:
1084 *
1085 * A 4D tensor with the same data type as <b>x</b>. The shape is determined by <b>input</b>, <b>blockShape</b>, and <b>paddings</b>. Assume that the input shape is [n,c,h,w], then:
1086 * \f$ output.shape[0] = n * blockShape[0] * blockShape[1]\f$<br>
1087 * \f$ output.shape[1] = c \f$<br>
1088 * \f$ output.shape[2] = (h + paddings[0][0] + paddings[0][1]) / blockShape[0] \f$<br>
1089 * \f$ output.shape[3] = (w + paddings[1][0] + paddings[1][1]) / blockShape[1] \f$<br>
1090 * \f$ (h + paddings[0][0] + paddings[0][1]) must be an integer multiple of \f$ blockShape[0]\f$, and (w + paddings[1][0] + paddings[1][1]) \f$ must be an integer multiple of \f$ blockShape[1] \f$.
1091 *
1092 * @since 3.2
1093 * @version 1.0
1094 */
1095struct SpaceToBatchND
1096{
1097    /** Number of blocks. The value must be greater than 1. */
1098    long[] blockShape;
1099    /** Padding size for spatial dimensions. */
1100    long[][] paddings;
1101};
1102
1103/**
1104 * @brief Splits a tensor into multiple tensors along the axis dimension. The number of tensors is specified by <b>outputNum</b>.
1105 *
1106 * The {@link NodeType} of this operator is <b>NODE_TYPE_SPLIT</b>.
1107 *
1108 * Inputs:
1109 *
1110 * * <b>x</b>, an n-dimensional tensor.
1111 *
1112 * Outputs:
1113 *
1114 * * An array of n-dimensional tensors, with the same data type and dimensions. The data type of each tensor is the same as that of <b>x</b>.
1115 *
1116 * @since 3.2
1117 * @version 1.0
1118 */
1119struct Split
1120{
1121    /** Number of output sensors. */
1122    long outputNum;
1123    /**
1124     * Size of each tensor to output.
1125     * If <b>size_splits</b> is empty, <b>x</b> will be evenly split into tensors of the same size. In this case, x.shape[axis] must be an integer multiple of <b>outputNum</b>.
1126     * If <b>size_splits</b> is not empty, the sum of all elements must be equal to x.shape[axis].
1127     */
1128    long[] sizeSplits;
1129    /** Target axis on which <b>x</b> is split. The data type is int. */
1130    long axis;
1131};
1132
1133/**
1134 * @brief Calculates the square root of a tensor.
1135 *
1136 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQRT</b>.
1137 *
1138 * Inputs:
1139 *
1140 * * <b>x</b>, an n-dimensional tensor.
1141 *
1142 * Outputs:
1143 *
1144 * * An n-dimensional tensor, with the same data type and shape as <b>x</b>.
1145 *
1146 * @since 3.2
1147 * @version 1.0
1148 */
1149struct Sqrt
1150{
1151};
1152
1153/**
1154 * @brief Calculates the square of the difference between two tensors. The <b>SquaredDifference</b> operator supports subtraction between tensors.
1155 *
1156 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQUEEZE</b>.
1157 *
1158 * Inputs:
1159 *
1160 * * <b>x</b>, a tensor representing the minuend, which can be a number or a Boolean value.
1161 * * <b>x</b>, a tensor representing the subtrahend, which can be a number or a Boolean value.
1162 *
1163 * Outputs:
1164 *
1165 * * A tensor obtained. The shape of the output is determined by <b>x</b> and <b>y</b>. If <b>x</b> and <b>y</b> are of the same shape,
1166 * the shape of the output is the same as that of <b>x</b> and <b>y</b>. If <b>x</b> and <b>y</b> are of different types, you need to perform the broadcast operation on <b>x</b> and <b>y</b> first.
1167 * The precision of the output is the same as the input tensor with higher precision.
1168 *
1169 * @since 3.2
1170 * @version 1.0
1171 */
1172struct SquaredDifference
1173{
1174};
1175
1176/**
1177 * @brief Removes the dimension with length of 1 from the specified axis. The int8 quantization input is supported.
1178 *
1179 * Assume that the shape of <b>x</b> is [2, 1, 1, 2, 2] and <b>axis</b> is [0,1], the shape of the output tensor must be [2, 1, 2, 2]. That is, the dimension with one element between the 0th and 1st dimensions is removed.
1180 *
1181 * The {@link NodeType} of this operator is <b>NODE_TYPE_SQUARED_DIFFERENCE</b>.
1182 *
1183 * Inputs:
1184 *
1185 * * <b>x</b>, an n-dimensional tensor.
1186 *
1187 * Outputs:
1188 *
1189 * *Tensor obtained.
1190 *
1191 * @since 3.2
1192 * @version 1.0
1193 */
1194struct Squeeze
1195{
1196    /** Axis on which the dimension of length 1 is to be removed. The value can be an integer or an array. The value range of the integer is [-n, n). */
1197    long[] axis;
1198};
1199
1200/**
1201 * @brief Stacks multiple tensors along the specified axis. If the number of dimensions of each tensor is <b>n</b> before stacking, the number of dimensions of the output tensor is <b>n</b>+1.
1202 *
1203 * The {@link NodeType} of this operator is <b>NODE_TYPE_STACK</b>.
1204 *
1205 * Inputs:
1206 *
1207 * * Multiple n-dimensional tensors, which are of the same data type and shape.
1208 *
1209 * Outputs:
1210 *
1211 * * An n+1D tensor along the specified axis, with the same data type and precision as the input tensors.
1212 *
1213 * @since 3.2
1214 * @version 1.0
1215 */
1216struct Stack
1217{
1218    /** An integer that specifies the dimension for tensor stacking. The value range is [-(n+1),(n+1)), which means a negative number is allowed. */
1219    long axis;
1220};
1221
1222/**
1223 * @brief Slices a tensor at the specified intervals.
1224 *
1225 * The {@link NodeType} of this operator is <b>NODE_TYPE_STRIDED_SLICE</b>.
1226 *
1227 * Inputs:
1228 *
1229 * * <b>x</b>, an n-dimensional tensor.
1230 * * <b>begin</b>, a 1D tensor that specifies the position from which the tensor is sliced. The length of <b>begin</b> is <b>n</b>. begin[i] specifies the start point to slice in the ith dimension.
1231 * * <b>end</b>, a 1D tensor that specifies the end to which the tensor is sliced. The length of <b>end</b> is <b>n</b>. end[i] specifies the end point to slice in the ith dimension.
1232 * * <b>strides</b>, a 1D tensor that specifies the intervals for slicing. The length of <b>strides</b> is <b>n</b>. strides[i] specifies the intervals at which the tensor is sliced in the ith dimension. Negative values are allowed.
1233 *
1234 * For the input tensor, the shapes of <b>begin</b>, <b>end</b>, and <b>strides</b> must be the same. The indices of <b>begin</b> and <b>end</b> start from <b>0</b>. The elements of <b>strides</b> cannot be <b>0</b>.
1235 *
1236 * Outputs:
1237 *
1238 * * A tensor, with the same data type as <b>x</b>. The number of dimensions of the output tensor is rank (x[0])+1.
1239 *
1240 * @since 3.2
1241 * @version 1.0
1242 */
1243struct StridedSlice
1244{
1245    /**
1246     * Mask of <b>begin</b>.
1247     * <b>beginMask</b> identifies different dimensions of <b>x</b> in binary code. For example, if bit i of <b>beginMask</b> is set to <b>1</b>, the setting of <b>begin</b> in the ith dimension will be invalid, which means the start index of that dimension is <b>0</b>. The default value is <b>0</b>.
1248     */
1249    long beginMask;
1250    /**
1251     * Mask of <b>end</b>. The parameter is similar to <b>begin_mask</b>.
1252     * <b>endMask</b> identifies different dimensions of <b>x</b> in binary code. For example, if bit i of <b>endMask</b> is set to <b>1</b>, the setting of <b>end</b> in the ith dimension will be invalid, which means the tensor will be sliced till the end in that dimension. The default value is <b>0</b>.
1253     */
1254    long endMask;
1255    /**
1256     * An integer used to mask <b>begin</b> and <b>end</b>.
1257     * The tensor does not need to be sliced in the non-zero dimensions.
1258     * <b>ellipsisMask</b> is represented in binary code. If bit i of <b>ellipsisMask</b> is <b>1</b>, elements are sliced from the first element at strides[i] in the ith dimension until the tensor boundary.
1259     */
1260    long ellipsisMask;
1261    /**
1262     * Used to add a dimension.
1263     * <b>newAxisMask</b> identifies different dimensions of <b>x</b> in binary code. If bit i is <b>1</b>, the settings of <b>begin</b>, <b>end</b>, and <b>strides</b> are invalid for all dimensions, and a dimension with size of 1 is added to bit i.
1264     */
1265    long newAxisMask;
1266    /**
1267     * Used to shrink the specified dimension.
1268     * <b>shrinkAxisMask</b> is represented in binary code. If the ith bit of the <b>shrinkAxisMask</b> is <b>1</b>, all elements in the ith dimension will be discarded, and the length of the ith dimension is shrunk to <b>1</b>.
1269     */
1270    long shrinkAxisMask;
1271};
1272
1273/**
1274 * @brief Calculates the difference between two tensors.
1275 *
1276 * The {@link NodeType} of this operator is <b>NODE_TYPE_SUB_FUSION</b>.
1277 *
1278 * Inputs:
1279 *
1280 * * <b>x</b>, the minuend, which is a tensor of the int or float type.
1281 * * <b>y</b>, the subtrahend, which is a tensor of the int or float type.
1282 *
1283 * Outputs:
1284 *
1285 * * Difference between the two tensors. The output shape is determined by<b>x</b> and <b>y</b>. If <b>x</b> and <b>y</b> are of the same shape, the output tensor has the same shape as <b>x</b> and <b>y</b>.
1286 * If <b>x</b> and <b>y</b> are of different shapes, perform the broadcast operation on <b>x</b> or <b>y</b> first. The precision of the output is the same as the input tensor with higher precision.
1287 *
1288 * @since 3.2
1289 * @version 1.0
1290 */
1291struct SubFusion
1292{
1293    /** Activation function type. For details, see {@link ActivationType}. */
1294    enum ActivationType activationType;
1295};
1296
1297/**
1298 * @brief Copies a tensor by the specified times.
1299 *
1300 * The {@link NodeType} of this operator is <b>NODE_TYPE_TILE_FUSION</b>.
1301 *
1302 * Inputs:
1303 *
1304 * * <b>x</b>, an n-dimensional tensor.
1305 * * <b>multiples</b>, a 1D tensor that specifies the number of times that the input tensor is copied in each dimension. The length <b>m</b> is not less than the number of dimensions of <b>x</b>.
1306 *
1307 * Outputs:
1308 *
1309 * * An m-dimensional tensor, with the same data type as <b>x</b>. If the length of <b>x</b> is the same as that of <b>multiples</b>,
1310 * the number of dimensions of the output tensor is the same as that of the <b>x</b>, that is, an n-dimensional tensor is output. If the length of <b>multiples</b> is greater than <b>n</b>, <b>1s</b> are used to pad the dimensions of <b>x</b>.
1311 * Then, <b>x</b> is copies the specified number of times in each dimension to obtain an m-dimensional tensor.
1312 *
1313 * @since 3.2
1314 * @version 1.0
1315 */
1316struct TileFusion
1317{
1318    /** A 1D tensor that specifies the number of times that data is copied in each dimension. The length <b>m</b> is not less than the number of dimensions of <b>x</b>. */
1319    long[] dims;
1320};
1321
1322/**
1323 * @brief Obtains the first K maximum values along the axis and their indices.
1324 *
1325 * The {@link NodeType} of this operator is <b>NODE_TYPE_TOPK_FUSION</b>.
1326 *
1327 * Inputs:
1328 *
1329 * * <b>x</b>, an n-dimensional tensor.
1330 *
1331 * Outputs:
1332 *
1333 * * <b>output0</b>, the first K maximum values in the axis dimension.
1334 * * <b>output1</b>, indices of the first K maximum values.
1335 *
1336 * @since 3.2
1337 * @version 1.0
1338 */
1339struct TopKFusion
1340{
1341    /** The value <b>true</b> means to sort the elements in descending order; the value <b>false</b> means the opposite. */
1342    boolean sorted;
1343    /** Specified axis. */
1344    long axis;
1345};
1346
1347/**
1348 * @brief Transposes a tensor.
1349 *
1350 * The {@link NodeType} of this operator is <b>NODE_TYPE_TRANSPOSE</b>.
1351 *
1352 * Inputs:
1353 *
1354 * * <b>x</b>, an n-dimensional tensor to transpose.
1355 * * <b>perm</b>, a 1D tensor that specifies the permutation. Its length is the same as the number of dimensions of <b>x</b>.
1356 *
1357 * Outputs:
1358 *
1359 * * An n-dimensional tensor, which has the same data type and quantization parameters as <b>x</b>. The shape is determined by the shape of <b>x</b> and <b>perm</b>.
1360 *
1361 * @since 3.2
1362 * @version 1.0
1363 */
1364struct Transpose
1365{
1366};
1367
1368/**
1369 * @brief Adds a dimension based on the value of <b>axis</b>. *
1370 *
1371 * The {@link NodeType} of this operator is <b>NODE_TYPE_UNSQUEEZE</b>.
1372 *
1373 * Inputs:
1374 *
1375 * * <b>x</b>, an n-dimensional tensor.
1376 *
1377 * Outputs:
1378 *
1379 * * Tensor output.
1380 *
1381 * @since 3.2
1382 * @version 1.0
1383 */
1384struct Unsqueeze
1385{
1386    /** Dimension to add. The value of <b>axis</b> can be an integer or an array of integers. The value range of the integer is [-n, n). */
1387    long[] axis;
1388};
1389
1390/** @} */
1391