1/*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @addtogroup NNRt
18 * @{
19 *
20 * @brief Provides a unified interface for AI chip drivers to access OpenHarmony. 
21 * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field.
22 * It serves as a bridge between the upper-layer AI inference framework and the underlying acceleration chip to implement cross-chip inference computing of AI models.
23 * @since 3.2
24 * @version 1.0
25 */
26
27/**
28 * @file IPreparedModel.idl
29 *
30 * @brief Defines the APIs for exporting AI models and performing AI model inference.
31 *
32 * @since 3.2
33 * @version 1.0
34 */
35
36/**
37 * @brief Defines the package path of the NNRt module.
38 *
39 * @since 3.2
40 * @version 1.0
41 */
42package ohos.hdi.nnrt.v1_0;
43
44import ohos.hdi.nnrt.v1_0.NnrtTypes;
45
46/**
47 * @brief Provides the APIs for exporting AI models and performing AI model inference.
48 *
49 * @since 3.2
50 * @version 1.0
51 */
52interface IPreparedModel {
53    /**
54     * @brief Exports an AI model from the cache.
55     *
56     * @param modelCache Indicates an array of model files to export. For details, see {@link SharedBuffer}.
57     *
58     * @return Returns <b>0</b> if the operation is successful.
59     * @return Returns a negative number if the operation fails.
60     */
61    ExportModelCache([out] struct SharedBuffer[] modelCache);
62
63    /**
64     * @brief Performs AI model inference.
65     *
66     * @param inputs Indicates the input data for AI model inference. The data is input in the sequence defined by the model. For details about the input data type, see {@link IOTensor}.
67     * @param outputs Indicates the output data of AI model inference. After inference, the output data is written to the shared buffer. For details about the output data type, see {@link IOTensor}.
68     * @param outputsDims Indicates the dimensions of the output data. The output sequence is the same as that of <b>outputs</b>.
69     * @param isOutputBufferEnough Indicates whether the shared buffer space is sufficient for the output data. The value <b>true</b> means the shared buffer space is sufficient; the value <b>false</b> means the opposite.
70     *
71     * @return Returns <b>0</b> if the operation is successful.
72     * @return Returns a negative number if the operation fails.
73     */
74    Run([in] struct IOTensor[] inputs, [in] struct IOTensor[] outputs,
75        [out] int[][] outputsDims, [out] boolean[] isOutputBufferEnough);
76}
77
78/** @} */
79