1 /*
2  * Copyright (c) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <string>
16 #include <vector>
17 #include <gtest/gtest.h>
18 
19 #include "interfaces/kits/c/neural_network_runtime/neural_network_runtime.h"
20 #include "common/log.h"
21 #include "nncore_utils.h"
22 
23 using namespace testing::ext;
24 using namespace OHOS::NeuralNetworkRuntime::Test;
25 
26 class EndToEndTest : public testing::Test {
27 public:
SetUp()28     void SetUp()
29     {
30         CreateFolder(CACHE_DIR);
31     }
TearDown()32     void TearDown()
33     {
34         DeleteFolder(CACHE_DIR);
35     }
36 };
37 
BuildModel(OH_NNModel **model)38 void BuildModel(OH_NNModel **model)
39 {
40     *model = OH_NNModel_Construct();
41     ASSERT_NE(nullptr, model);
42     AddModel addModel;
43     OHNNGraphArgs graphArgs = addModel.graphArgs;
44     ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(*model, graphArgs));
45 }
46 
BuildDynamicModel(OH_NNModel **model)47 void BuildDynamicModel(OH_NNModel **model)
48 {
49     *model = OH_NNModel_Construct();
50     ASSERT_NE(nullptr, model);
51     AvgPoolDynamicModel avgModel;
52     OHNNGraphArgs graphArgs = avgModel.graphArgs;
53     ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraph(*model, graphArgs));
54 }
55 
BuildModelWithQuantParams(OH_NNModel **model)56 void BuildModelWithQuantParams(OH_NNModel **model)
57 {
58     *model = OH_NNModel_Construct();
59     ASSERT_NE(nullptr, model);
60     AddModel addModel;
61     OHNNGraphArgs graphArgs = addModel.graphArgs;
62     ASSERT_EQ(OH_NN_SUCCESS, BuildSingleOpGraphWithQuantParams(*model, graphArgs));
63 }
64 
ConstructCompilation(OH_NNModel* model, size_t deviceId, bool isUseCache = true)65 OH_NNCompilation* ConstructCompilation(OH_NNModel* model, size_t deviceId, bool isUseCache = true)
66 {
67     OH_NNCompilation* compilation = nullptr;
68     if (model == nullptr) {
69         compilation = OH_NNCompilation_ConstructForCache();
70         if (compilation == nullptr) {
71             LOGE("End2EndTest::OH_NNCompilation_ConstructForCache failed.");
72             return nullptr;
73         }
74     } else {
75         compilation = OH_NNCompilation_Construct(model);
76         if (compilation == nullptr) {
77             LOGE("End2EndTest::OH_NNCompilation_Construct failed.");
78             return nullptr;
79         }
80     }
81 
82     OH_NN_ReturnCode returnCode = OH_NNCompilation_SetDevice(compilation, deviceId);
83     if (returnCode != OH_NN_SUCCESS) {
84         LOGE("End2EndTest::OH_NNCompilation_SetDevice failed.");
85         return nullptr;
86     }
87 
88     if (isUseCache) {
89         returnCode = OH_NNCompilation_SetCache(compilation, CACHE_DIR.c_str(), 1);
90         if (returnCode != OH_NN_SUCCESS) {
91             LOGE("End2EndTest::OH_NNCompilation_SetCache failed.");
92             return nullptr;
93         }
94     }
95 
96     returnCode = OH_NNCompilation_SetPerformanceMode(compilation, OH_NN_PERFORMANCE_EXTREME);
97     if (returnCode != OH_NN_SUCCESS) {
98         LOGE("End2EndTest::OH_NNCompilation_SetPerformanceMode failed.");
99         return nullptr;
100     }
101 
102     returnCode = OH_NNCompilation_SetPriority(compilation, OH_NN_PRIORITY_HIGH);
103     if (returnCode != OH_NN_SUCCESS) {
104         LOGE("End2EndTest::OH_NNCompilation_SetPriority failed.");
105         return nullptr;
106     }
107 
108     returnCode = OH_NNCompilation_EnableFloat16(compilation, false);
109     if (returnCode != OH_NN_SUCCESS) {
110         LOGE("End2EndTest::OH_NNCompilation_EnableFloat16 failed.");
111         return nullptr;
112     }
113 
114     // 执行编译
115     returnCode = OH_NNCompilation_Build(compilation);
116     if (returnCode != OH_NN_SUCCESS) {
117         LOGE("End2EndTest::OH_NNCompilation_Build failed.");
118         return nullptr;
119     }
120     return compilation;
121 }
122 
SetInputData(NN_Tensor* inputTensor[], size_t inputSize)123 OH_NN_ReturnCode SetInputData(NN_Tensor* inputTensor[], size_t inputSize)
124 {
125     OH_NN_DataType dataType(OH_NN_FLOAT32);
126     OH_NN_ReturnCode ret{OH_NN_FAILED};
127     size_t elementNum = 0;
128     for (size_t i = 0; i < inputSize; ++i) {
129         auto data = OH_NNTensor_GetDataBuffer(inputTensor[i]);
130         if (data == nullptr) {
131             LOGE("End2EndTest::OH_NNTensor_GetDataBuffer failed.");
132             return OH_NN_FAILED;
133         }
134         auto desc = OH_NNTensor_GetTensorDesc(inputTensor[i]);
135         if (desc == nullptr) {
136             LOGE("End2EndTest::OH_NNTensor_GetTensorDesc failed.");
137             return OH_NN_FAILED;
138         }
139         ret = OH_NNTensorDesc_GetDataType(desc, &dataType);
140         if (ret != OH_NN_SUCCESS) {
141             LOGE("End2EndTest::OH_NNTensorDesc_GetDataType failed.");
142             return ret;
143         }
144         ret = OH_NNTensorDesc_GetElementCount(desc, &elementNum);
145         if (ret != OH_NN_SUCCESS) {
146             LOGE("End2EndTest::OH_NNTensorDesc_GetElementCount failed.");
147             return ret;
148         }
149         switch (dataType) {
150             case OH_NN_FLOAT32: {
151                 float* floatValue = reinterpret_cast<float*>(data);
152                 for (size_t j = 0; j < elementNum; ++j) {
153                     floatValue[j] = static_cast<float>(j);
154                 }
155                 break;
156             }
157             case OH_NN_INT32: {
158                 int* intValue = reinterpret_cast<int*>(data);
159                 for (size_t j = 0; j < elementNum; ++j) {
160                     intValue[j] = static_cast<int>(j);
161                 }
162                 break;
163             }
164             default:
165                 return OH_NN_FAILED;
166         }
167     }
168     return OH_NN_SUCCESS;
169 }
GetInputAndOutputTensorDesc(OH_NNExecutor *executor, size_t *inputCount, std::vector<NN_TensorDesc*>& inputTensorDescs, size_t *outputCount, std::vector<NN_TensorDesc*>& outputTensorDescs)170 OH_NN_ReturnCode GetInputAndOutputTensorDesc(OH_NNExecutor *executor,
171                                              size_t *inputCount, std::vector<NN_TensorDesc*>& inputTensorDescs,
172                                              size_t *outputCount, std::vector<NN_TensorDesc*>& outputTensorDescs)
173 {
174     OH_NN_ReturnCode returnCode = OH_NNExecutor_GetInputCount(executor, inputCount);
175     if (returnCode != OH_NN_SUCCESS) {
176         LOGE("End2EndTest::OH_NNExecutor_GetInputCount failed.");
177         return returnCode;
178     }
179     NN_TensorDesc* tensorDescTmp = nullptr;
180     for (size_t i = 0; i < *inputCount; ++i) {
181         tensorDescTmp = OH_NNExecutor_CreateInputTensorDesc(executor, i);
182         if (tensorDescTmp == nullptr) {
183             LOGE("End2EndTest::OH_NNExecutor_CreateInputTensorDesc failed.");
184             return OH_NN_FAILED;
185         }
186         inputTensorDescs.emplace_back(tensorDescTmp);
187     }
188     returnCode = OH_NNExecutor_GetOutputCount(executor, outputCount);
189     if (returnCode != OH_NN_SUCCESS) {
190         LOGE("End2EndTest::OH_NNExecutor_GetOutputCount failed.");
191         return returnCode;
192     }
193     for (size_t i = 0; i < *outputCount; ++i) {
194         tensorDescTmp = OH_NNExecutor_CreateOutputTensorDesc(executor, i);
195         if (tensorDescTmp == nullptr) {
196             LOGE("End2EndTest::OH_NNExecutor_CreateOutputTensorDesc failed.");
197             return OH_NN_FAILED;
198         }
199         outputTensorDescs.emplace_back(tensorDescTmp);
200     }
201 
202     return returnCode;
203 }
204 
GetInputDimAndSetShape(OH_NNExecutor *executor, std::vector<NN_TensorDesc*>& inputTensorDescs, std::vector<NN_TensorDesc*>& outputTensorDescs, bool isDynamic)205 OH_NN_ReturnCode GetInputDimAndSetShape(OH_NNExecutor *executor, std::vector<NN_TensorDesc*>& inputTensorDescs,
206                                         std::vector<NN_TensorDesc*>& outputTensorDescs, bool isDynamic)
207 {
208     if (isDynamic) {
209         size_t *minInputDims = nullptr;
210         size_t *maxInputDims = nullptr;
211         size_t shapeLength = ZERO;
212         for (size_t i = 0; i < inputTensorDescs.size(); ++i) {
213             if (OH_NN_SUCCESS != OH_NNExecutor_GetInputDimRange(executor, i, &minInputDims,
214                                                                 &maxInputDims, &shapeLength)) {
215                 LOGE("End2EndTest::OH_NNExecutor_GetInputDimRange failed.");
216                 return OH_NN_FAILED;
217             }
218             std::vector<int32_t> minInputDimsT;
219             for (size_t j = 0; j < shapeLength; ++j) {
220                 minInputDimsT.emplace_back(static_cast<int32_t>(minInputDims[j]));
221             }
222             if (OH_NN_SUCCESS != OH_NNTensorDesc_SetShape(inputTensorDescs[i], minInputDimsT.data(), shapeLength)) {
223                 LOGE("End2EndTest::OH_NNTensorDesc_SetShape failed.");
224                 return OH_NN_FAILED;
225             }
226         }
227         std::vector<int32_t> outputShape{1, 2, 2, 1};
228         for (size_t i = 0; i < outputTensorDescs.size(); ++i) {
229             if (OH_NN_SUCCESS != OH_NNTensorDesc_SetShape(outputTensorDescs[i],
230                                                           outputShape.data(), outputShape.size())) {
231                 LOGE("End2EndTest::OH_NNTensorDesc_SetShape failed.");
232                 return OH_NN_FAILED;
233             }
234         }
235     }
236     return OH_NN_SUCCESS;
237 }
238 
CreateTensorAndDestroyTensorDesc(NN_Tensor* tensors[], size_t count, std::vector<NN_TensorDesc*>& tensorDescs, size_t deviceId)239 OH_NN_ReturnCode CreateTensorAndDestroyTensorDesc(NN_Tensor* tensors[], size_t count,
240                                                   std::vector<NN_TensorDesc*>& tensorDescs, size_t deviceId)
241 {
242     NN_Tensor* tensor = nullptr;
243     for (size_t i = 0; i < count; ++i) {
244         tensor = nullptr;
245         tensor = OH_NNTensor_Create(deviceId, tensorDescs[i]);
246         if (tensor == nullptr) {
247             LOGE("End2EndTest::OH_NNTensor_Create failed.");
248             return OH_NN_FAILED;
249         }
250         tensors[i] = tensor;
251     }
252     for (size_t i = 0; i < count; ++i) {
253         if (OH_NN_SUCCESS != OH_NNTensorDesc_Destroy(&tensorDescs[i])) {
254             LOGE("End2EndTest::OH_NNTensorDesc_Destroy failed.");
255             return OH_NN_FAILED;
256         }
257     }
258     return OH_NN_SUCCESS;
259 }
260 
DestroyInputAndOutputTensor(NN_Tensor** inputTensors, size_t inputCount, NN_Tensor** outputTensors, size_t outputCount)261 OH_NN_ReturnCode DestroyInputAndOutputTensor(NN_Tensor** inputTensors, size_t inputCount,
262                                              NN_Tensor** outputTensors, size_t outputCount)
263 {
264     OH_NN_ReturnCode returnCode = OH_NN_SUCCESS;
265     for (size_t i = 0; i < inputCount; ++i) {
266         returnCode = OH_NNTensor_Destroy(&inputTensors[i]);
267         if (returnCode != OH_NN_SUCCESS) {
268             LOGE("End2EndTest::OH_NNTensor_Destroy failed.");
269             return OH_NN_FAILED;
270         }
271     }
272     for (size_t i = 0; i < outputCount; ++i) {
273         returnCode = OH_NNTensor_Destroy(&outputTensors[i]);
274         if (returnCode != OH_NN_SUCCESS) {
275             LOGE("End2EndTest::OH_NNTensor_Destroy failed.");
276             return OH_NN_FAILED;
277         }
278     }
279     return OH_NN_SUCCESS;
280 }
281 
RunExecutor(OH_NNCompilation* compilation, size_t deviceId, bool isDynamic = false)282 OH_NNExecutor* RunExecutor(OH_NNCompilation* compilation, size_t deviceId, bool isDynamic = false)
283 {
284     OH_NNExecutor *executor = OH_NNExecutor_Construct(compilation);
285     if (executor == nullptr) {
286         LOGE("End2EndTest::OH_NNExecutor_Construct failed.");
287         return nullptr;
288     }
289     size_t inputCount = 0;
290     std::vector<NN_TensorDesc*> inputTensorDescs;
291     size_t outputCount = 0;
292     std::vector<NN_TensorDesc*> outputTensorDescs;
293     OH_NN_ReturnCode returnCode = GetInputAndOutputTensorDesc(executor, &inputCount, inputTensorDescs,
294                                                               &outputCount, outputTensorDescs);
295     if (returnCode != OH_NN_SUCCESS) {
296         LOGE("End2EndTest::GetInputAndOutputTensorDesc failed.");
297         return nullptr;
298     }
299     returnCode = GetInputDimAndSetShape(executor, inputTensorDescs, outputTensorDescs, isDynamic);
300     if (returnCode != OH_NN_SUCCESS) {
301         LOGE("End2EndTest::GetInputDimAndSetShape failed.");
302         return nullptr;
303     }
304     NN_Tensor* inputTensors[inputCount];
305     OH_NN_ReturnCode returnCodeTmp = CreateTensorAndDestroyTensorDesc(inputTensors, inputCount,
306                                                                       inputTensorDescs, deviceId);
307     NN_Tensor* outputTensors[outputCount];
308     returnCode = CreateTensorAndDestroyTensorDesc(outputTensors, outputCount, outputTensorDescs, deviceId);
309     if (returnCode != OH_NN_SUCCESS || returnCodeTmp != OH_NN_SUCCESS) {
310         LOGE("End2EndTest::Tensors create failed.");
311         return nullptr;
312     }
313     returnCode = SetInputData(inputTensors, inputCount);
314     if (returnCode != OH_NN_SUCCESS) {
315         LOGE("End2EndTest::SetInputData failed.");
316         return nullptr;
317     }
318     returnCode = OH_NNExecutor_RunSync(executor, inputTensors, inputCount, outputTensors, outputCount);
319     if (returnCode != OH_NN_SUCCESS) {
320         LOGE("End2EndTest::OH_NNExecutor_RunSync failed.");
321         return nullptr;
322     }
323     returnCode = DestroyInputAndOutputTensor(inputTensors, inputCount, outputTensors, outputCount);
324     if (returnCode != OH_NN_SUCCESS) {
325         LOGE("End2EndTest::DestroyInputAndOutputTensor failed.");
326         return nullptr;
327     }
328     return executor;
329 }
330 
331 /*
332 * @tc.name:sub_AI_NNRt_Core_Func_North_EndToEnd_0100
333 * @tc.desc:定长模型编译端到端测试
334 * @tc.type:FUNC
335 */
HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0100, Function | MediumTest | Level1)336 HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0100, Function | MediumTest | Level1)
337 {
338     size_t deviceId = 0;
339     ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId));
340 
341     OH_NNModel* model = nullptr;
342     BuildModel(&model);
343 
344     OH_NNCompilation* compilation = ConstructCompilation(model, deviceId);
345     ASSERT_NE(nullptr, compilation);
346 
347     OH_NNModel_Destroy(&model);
348     OH_NNExecutor* executor = RunExecutor(compilation, deviceId);
349     ASSERT_NE(nullptr, executor);
350     OH_NNCompilation_Destroy(&compilation);
351     OH_NNExecutor_Destroy(&executor);
352 }
353 
354 /*
355 * @tc.name:sub_AI_NNRt_Core_Func_North_EndToEnd_0200
356 * @tc.desc:变长模型编译端到端测试
357 * @tc.type:FUNC
358 */
HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0200, Function | MediumTest | Level1)359 HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0200, Function | MediumTest | Level1)
360 {
361     size_t deviceId = 0;
362     ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId));
363     OH_NNModel* model = nullptr;
364     BuildDynamicModel(&model);
365 
366     OH_NNCompilation* compilation = ConstructCompilation(model, deviceId);
367     ASSERT_NE(nullptr, compilation);
368     OH_NNModel_Destroy(&model);
369     OH_NNExecutor* executor = RunExecutor(compilation, deviceId, true);
370     ASSERT_NE(nullptr, executor);
371     OH_NNCompilation_Destroy(&compilation);
372     OH_NNExecutor_Destroy(&executor);
373 }
374 
375 /*
376 * @tc.name:sub_AI_NNRt_Core_Func_North_EndToEnd_0300
377 * @tc.desc:定长模型编译带量化参数端到端测试
378 * @tc.type:FUNC
379 */
HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0300, Function | MediumTest | Level1)380 HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_EndToEnd_0300, Function | MediumTest | Level1)
381 {
382     size_t deviceId = 0;
383     ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId));
384 
385     OH_NNModel* model = nullptr;
386     BuildModelWithQuantParams(&model);
387 
388     OH_NNCompilation* compilation = ConstructCompilation(model, deviceId);
389     ASSERT_NE(nullptr, compilation);
390     OH_NNModel_Destroy(&model);
391     OH_NNExecutor* executor = RunExecutor(compilation, deviceId);
392     ASSERT_NE(nullptr, executor);
393     OH_NNCompilation_Destroy(&compilation);
394     OH_NNExecutor_Destroy(&executor);
395 }
396 
397 /*
398 * @tc.name:sub_AI_NNRt_Core_Func_North_Reliability_0100
399 * @tc.desc:定长模型编译长稳测试
400 * @tc.type:FUNC
401 */
HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_Reliability_0100, Reliability | MediumTest | Level2)402 HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_Reliability_0100, Reliability | MediumTest | Level2)
403 {
404     size_t deviceId = 0;
405     ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId));
406 
407     OH_NNModel* model = nullptr;
408     BuildModel(&model);
409     for (int i = 0; i < STRESS_COUNT; i++) {
410         OH_NNCompilation* compilation = ConstructCompilation(model, deviceId);
411         ASSERT_NE(nullptr, compilation);
412         OH_NNExecutor* executor = RunExecutor(compilation, deviceId);
413         ASSERT_NE(nullptr, executor);
414         OH_NNCompilation_Destroy(&compilation);
415         OH_NNExecutor_Destroy(&executor);
416         if (i % PRINT_FREQ == 0) {
417             printf("[NnrtTest] Reliability_test_001 times: %d/%d\n", i, STRESS_COUNT);
418         }
419     }
420     OH_NNModel_Destroy(&model);
421 }
422 
423 /*
424 * @tc.name:sub_AI_NNRt_Core_Func_North_Reliability_0200
425 * @tc.desc:变长模型编译端到端测试
426 * @tc.type:FUNC
427 */
HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_Reliability_0200, Reliability | MediumTest | Level2)428 HWTEST_F(EndToEndTest, sub_AI_NNRt_Core_Func_North_Reliability_0200, Reliability | MediumTest | Level2)
429 {
430     size_t deviceId = 0;
431     ASSERT_EQ(OH_NN_SUCCESS, GetDeviceID(&deviceId));
432 
433     OH_NNModel* model = nullptr;
434     BuildDynamicModel(&model);
435     for (int i = 0; i < STRESS_COUNT; i++) {
436         OH_NNCompilation* compilation = ConstructCompilation(model, deviceId, false);
437         ASSERT_NE(nullptr, compilation);
438         OH_NNExecutor* executor = RunExecutor(compilation, deviceId, true);
439         ASSERT_NE(nullptr, executor);
440         OH_NNCompilation_Destroy(&compilation);
441         OH_NNExecutor_Destroy(&executor);
442         if (i % PRINT_FREQ == 0) {
443             printf("[NnrtTest] Reliability_test_002 times: %d/%d\n", i, STRESS_COUNT);
444         }
445     }
446     OH_NNModel_Destroy(&model);
447 }
448