1c5e268c6Sopenharmony_ci/*
2c5e268c6Sopenharmony_ci * Copyright (c) 2022 Huawei Device Co., Ltd.
3c5e268c6Sopenharmony_ci * Licensed under the Apache License, Version 2.0 (the "License");
4c5e268c6Sopenharmony_ci * you may not use this file except in compliance with the License.
5c5e268c6Sopenharmony_ci * You may obtain a copy of the License at
6c5e268c6Sopenharmony_ci *
7c5e268c6Sopenharmony_ci *     http://www.apache.org/licenses/LICENSE-2.0
8c5e268c6Sopenharmony_ci *
9c5e268c6Sopenharmony_ci * Unless required by applicable law or agreed to in writing, software
10c5e268c6Sopenharmony_ci * distributed under the License is distributed on an "AS IS" BASIS,
11c5e268c6Sopenharmony_ci * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12c5e268c6Sopenharmony_ci * See the License for the specific language governing permissions and
13c5e268c6Sopenharmony_ci * limitations under the License.
14c5e268c6Sopenharmony_ci */
15c5e268c6Sopenharmony_ci
16c5e268c6Sopenharmony_ci/**
17c5e268c6Sopenharmony_ci * @addtogroup NNRt
18c5e268c6Sopenharmony_ci * @{
19c5e268c6Sopenharmony_ci *
20c5e268c6Sopenharmony_ci * @brief Provides a unified interface for AI chip drivers to access OpenHarmony. 
21c5e268c6Sopenharmony_ci * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field.
22c5e268c6Sopenharmony_ci * It serves as a bridge between the upper-layer AI inference framework and the underlying acceleration chip to implement cross-chip inference computing of AI models.
23c5e268c6Sopenharmony_ci * @since 3.2
24c5e268c6Sopenharmony_ci * @version 1.0
25c5e268c6Sopenharmony_ci */
26c5e268c6Sopenharmony_ci
27c5e268c6Sopenharmony_ci/**
28c5e268c6Sopenharmony_ci * @file IPreparedModel.idl
29c5e268c6Sopenharmony_ci *
30c5e268c6Sopenharmony_ci * @brief Defines the APIs for exporting AI models and performing AI model inference.
31c5e268c6Sopenharmony_ci *
32c5e268c6Sopenharmony_ci * @since 3.2
33c5e268c6Sopenharmony_ci * @version 1.0
34c5e268c6Sopenharmony_ci */
35c5e268c6Sopenharmony_ci
36c5e268c6Sopenharmony_ci/**
37c5e268c6Sopenharmony_ci * @brief Defines the package path of the NNRt module.
38c5e268c6Sopenharmony_ci *
39c5e268c6Sopenharmony_ci * @since 3.2
40c5e268c6Sopenharmony_ci * @version 1.0
41c5e268c6Sopenharmony_ci */
42c5e268c6Sopenharmony_cipackage ohos.hdi.nnrt.v1_0;
43c5e268c6Sopenharmony_ci
44c5e268c6Sopenharmony_ciimport ohos.hdi.nnrt.v1_0.NnrtTypes;
45c5e268c6Sopenharmony_ci
46c5e268c6Sopenharmony_ci/**
47c5e268c6Sopenharmony_ci * @brief Provides the APIs for exporting AI models and performing AI model inference.
48c5e268c6Sopenharmony_ci *
49c5e268c6Sopenharmony_ci * @since 3.2
50c5e268c6Sopenharmony_ci * @version 1.0
51c5e268c6Sopenharmony_ci */
52c5e268c6Sopenharmony_ciinterface IPreparedModel {
53c5e268c6Sopenharmony_ci    /**
54c5e268c6Sopenharmony_ci     * @brief Exports an AI model from the cache.
55c5e268c6Sopenharmony_ci     *
56c5e268c6Sopenharmony_ci     * @param modelCache Indicates an array of model files to export. For details, see {@link SharedBuffer}.
57c5e268c6Sopenharmony_ci     *
58c5e268c6Sopenharmony_ci     * @return Returns <b>0</b> if the operation is successful.
59c5e268c6Sopenharmony_ci     * @return Returns a negative number if the operation fails.
60c5e268c6Sopenharmony_ci     */
61c5e268c6Sopenharmony_ci    ExportModelCache([out] struct SharedBuffer[] modelCache);
62c5e268c6Sopenharmony_ci
63c5e268c6Sopenharmony_ci    /**
64c5e268c6Sopenharmony_ci     * @brief Performs AI model inference.
65c5e268c6Sopenharmony_ci     *
66c5e268c6Sopenharmony_ci     * @param inputs Indicates the input data for AI model inference. The data is input in the sequence defined by the model. For details about the input data type, see {@link IOTensor}.
67c5e268c6Sopenharmony_ci     * @param outputs Indicates the output data of AI model inference. After inference, the output data is written to the shared buffer. For details about the output data type, see {@link IOTensor}.
68c5e268c6Sopenharmony_ci     * @param outputsDims Indicates the dimensions of the output data. The output sequence is the same as that of <b>outputs</b>.
69c5e268c6Sopenharmony_ci     * @param isOutputBufferEnough Indicates whether the shared buffer space is sufficient for the output data. The value <b>true</b> means the shared buffer space is sufficient; the value <b>false</b> means the opposite.
70c5e268c6Sopenharmony_ci     *
71c5e268c6Sopenharmony_ci     * @return Returns <b>0</b> if the operation is successful.
72c5e268c6Sopenharmony_ci     * @return Returns a negative number if the operation fails.
73c5e268c6Sopenharmony_ci     */
74c5e268c6Sopenharmony_ci    Run([in] struct IOTensor[] inputs, [in] struct IOTensor[] outputs,
75c5e268c6Sopenharmony_ci        [out] int[][] outputsDims, [out] boolean[] isOutputBufferEnough);
76c5e268c6Sopenharmony_ci}
77c5e268c6Sopenharmony_ci
78c5e268c6Sopenharmony_ci/** @} */
79