1/*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * @addtogroup NNRt
18 * @{
19 *
20 * @brief Provides a unified interface for AI chip drivers to access OpenHarmony. 
21 * Neural Network Runtime (NNRt) is a cross-chip inference computing runtime environment oriented to the AI field.
22 * It serves as a bridge between the upper-layer AI inference framework and the underlying acceleration chip to implement cross-chip inference computing of AI models.
23 * @since 3.2
24 * @version 1.0
25 */
26
27/**
28 * @file INnrtDevice.idl
29 *
30 * @brief Defines methods related to chip devices.
31 *
32 * You can use the methods to query chip device information and build AI models.
33 *
34 * @since 3.2
35 * @version 1.0
36 */
37
38/**
39 * @brief Defines the package path of the NNRt module.
40 *
41 * @since 3.2
42 * @version 1.0
43 */
44package ohos.hdi.nnrt.v1_0;
45
46import ohos.hdi.nnrt.v1_0.NnrtTypes;
47import ohos.hdi.nnrt.v1_0.ModelTypes;
48import ohos.hdi.nnrt.v1_0.IPreparedModel;
49
50/**
51 * @brief Provides methods for device management and model building.
52 *
53 * When multiple devices are registered, ensure that the combination of the device name and vendor name is globally unique.
54 *
55 * @since 3.2
56 * @version 1.0
57 */
58interface INnrtDevice {
59    /**
60     * @brief Obtains the device name.
61     *
62     * @param name Indicates the device name obtained.
63     *
64     * @return Returns <b>0</b> if the operation is successful.
65     * @return Returns a negative number if the operation fails.
66     */
67    GetDeviceName([out] String name);
68
69    /**
70     * @brief Obtains the device vendor name.
71     *
72     * @param name Indicates the device vendor name obtained.
73     *
74     * @return Returns <b>0</b> if the operation is successful.
75     * @return Returns a negative number if the operation fails.
76     */
77    GetVendorName([out] String name);
78
79    /**
80     * @brief Obtains the device type.
81     *
82     * @param deviceType Indicates the device type obtained. For details, see {@link DeviceType}.
83     *
84     * @return Returns <b>0</b> if the operation is successful.
85     * @return Returns a negative number if the operation fails.
86     */
87    GetDeviceType([out] enum DeviceType deviceType);
88
89    /**
90      * @brief Obtains the device status.
91     *
92     * @param deviceType Indicates the device status obtained. For details, see {@link DeviceStatus}.
93     *
94     * @return Returns <b>0</b> if the operation is successful.
95     * @return Returns a negative number if the operation fails.
96     */
97    GetDeviceStatus([out] enum DeviceStatus status);
98
99    /**
100     * @brief Obtains the device's support for the operators of the specified AI model.
101     *
102     * @param model Indicates the AI model. For details, see {@link Model}.
103     * @param ops Indicates the operators supported and not supported by the device. The operators are listed in the same sequence as they listed in the API model.
104     *
105     * @return Returns <b>0</b> if the operation is successful.
106     * @return Returns a negative number if the operation fails.
107     */
108    GetSupportedOperation([in] struct Model model, [out] boolean[] ops);
109
110    /**
111     * @brief Checks whether the device supports the Float32 model with the Float16 precision.
112     *
113     * @param isSupported Indicates whether the Float16 precision is supported.
114     *
115     * @return Returns <b>0</b> if the operation is successful.
116     * @return Returns a negative number if the operation fails.
117     */
118    IsFloat16PrecisionSupported([out] boolean isSupported);
119
120    /**
121     * @brief Checks whether the device supports performance preference settings. For details about the performance preference, see {@link PerformanceMode}.
122     *
123     * @param isSupported Indicates whether the device supports performance preference settings.
124     *
125     * @return Returns <b>0</b> if the operation is successful.
126     * @return Returns a negative number if the operation fails.
127     */
128    IsPerformanceModeSupported([out] boolean isSupported);
129
130    /**
131     * @brief Checks whether the device supports task priority settings. For details about the priority, see {@link Priority}.
132     *
133     * @param isSupported Indicates whether the device supports task priority settings.
134     *
135     * @return Returns <b>0</b> if the operation is successful.
136     * @return Returns a negative number if the operation fails.
137     */
138    IsPrioritySupported([out] boolean isSupported);
139
140    /**
141     * @brief Checks whether the device supports dynamic input, which allows a model of different shapes to be used for different operations.
142     *
143     * If dynamic input is supported, <b>-1</b> is added in the shape of the input tensor.
144     *
145     * @param isSupported Indicates whether dynamic input is supported.
146     *
147     * @return Returns <b>0</b> if the operation is successful.
148     * @return Returns a negative number if the operation fails.
149     */
150    IsDynamicInputSupported([out] boolean isSupported);
151
152    /**
153     * @brief Builds a model.
154     *
155     * If the AI model supports dynamic input, at least one dimension of the input tensor contains <b>-1</b>.
156     *
157     * @param model indicates the module to build. For details, see {@link Model}.
158     * @param config Indicates the module configuration. For details, see {@link ModelConfig}.
159     * @param preparedModel Indicates the model object built. For details, see {@link IPreparedModel}.
160     *
161     * @return Returns <b>0</b> if the operation is successful.
162     * @return Returns a negative number if the operation fails.
163     */
164    PrepareModel([in] struct Model model, [in] struct ModelConfig config, [out] IPreparedModel preparedModel);
165
166    /**
167     * @brief Checks whether the device supports caching of the AI models built.
168     *
169     * If yes, <b>PrepareModelFromModelCache()</b> and <b>ExportModelCache()</b> need to be implemented.
170     *
171     * @param isSupported Indicates whether the device supports caching of the AI models built.
172     *
173     * @return Returns <b>0</b> if the operation is successful.
174     * @return Returns a negative number if the operation fails.
175     */
176    IsModelCacheSupported([out] boolean isSupported);
177
178    /**
179     * @brief Loads an AI model from the cache. The AI model is exported by using <b>ExportModelCache()</b>.
180     *
181     * @param modelCache Indicates an array of the model files, which are in the same sequence as they exported. For details, see {@link SharedBuffer}.
182     * @param config Indicates the configuration for loading the model. For details, see {@link ModelConfig}.
183     * @param preparedModel Indicates the model object obtained. For details, see {@link IPreparedModel}.
184     *
185     * @return Returns <b>0</b> if the operation is successful.
186     * @return Returns a negative number if the operation fails.
187     */
188    PrepareModelFromModelCache([in] struct SharedBuffer[] modelCache, [in] struct ModelConfig config,
189                               [out] IPreparedModel preparedModel);
190
191    /**
192     * @brief Allocates the shared memory for the device. The shared memory allows quick access to the input and output data for AI inference.
193     *
194     * @param length Indicates the shared memory to allocate, in bytes.
195     * @param buffer Indicates the information about the shared memory allocated, including the file descriptor and size of the shared memory. For details, see {@link SharedBuffer}.
196     *
197     * @return Returns <b>0</b> if the operation is successful.
198     * @return Returns a negative number if the operation fails.
199     */
200    AllocateBuffer([in] unsigned int length, [out] struct SharedBuffer buffer);
201
202    /**
203      * @brief Releases the shared memory.
204     *
205     * @param buffer Indicates the shared memory to release. For details, see {@link SharedBuffer}.
206     *
207     * @return Returns <b>0</b> if the operation is successful.
208     * @return Returns a negative number if the operation fails.
209     */
210    ReleaseBuffer([in] struct SharedBuffer buffer);
211}
212
213/** @} */
214