/foundation/ai/neural_network_runtime/test/unittest/ops/ |
H A D | ops_test.cpp | 39 std::shared_ptr<NNTensor> outputTensor; in SaveOutputTensor() local 40 outputTensor = TransToNNTensor(dataType, dim, quantParam, OH_NN_TENSOR); in SaveOutputTensor() 41 m_allTensors.emplace_back(outputTensor); in SaveOutputTensor()
|
H A D | lstm_test.cpp | 209 std::shared_ptr<NNTensor> outputTensor;
in SetOutputTensor() local 210 outputTensor = TransToNNTensor(OH_NN_FLOAT32, m_outputDim, nullptr, OH_NN_TENSOR);
in SetOutputTensor() 211 m_allTensors.emplace_back(outputTensor);
in SetOutputTensor()
|
/foundation/ai/neural_network_runtime/test/unittest/components/v1_0/neural_network_core_test/ |
H A D | neural_network_core_test.cpp | 2015 NN_Tensor* outputTensor[] = {nullptr}; in HWTEST_F() local 2017 OH_NN_ReturnCode ret = OH_NNExecutor_RunSync(executor, inputTensor, inputCount, outputTensor, outputcount); in HWTEST_F() 2039 NN_Tensor* outputTensor[] = {nullptr}; in HWTEST_F() local 2041 OH_NN_ReturnCode ret = OH_NNExecutor_RunSync(nnExecutor, inputTensor, inputCount, outputTensor, outputcount); in HWTEST_F() 2064 NN_Tensor* outputTensor[] = {nullptr}; in HWTEST_F() local 2066 OH_NN_ReturnCode ret = OH_NNExecutor_RunSync(nnExecutor, inputTensor, inputCount, outputTensor, outputcount); in HWTEST_F() 2089 NN_Tensor* outputTensor[] = {nullptr}; in HWTEST_F() local 2091 OH_NN_ReturnCode ret = OH_NNExecutor_RunSync(nnExecutor, inputTensor, inputCount, outputTensor, outputcount); in HWTEST_F() 2114 NN_Tensor* outputTensor[sizetensor]; in HWTEST_F() local 2116 OH_NN_ReturnCode ret = OH_NNExecutor_RunSync(nnExecutor, inputTensor, inputCount, outputTensor, outputcoun in HWTEST_F() 2131 NN_Tensor* outputTensor[] = {nullptr}; HWTEST_F() local 2158 NN_Tensor* outputTensor[] = {nullptr}; HWTEST_F() local 2186 NN_Tensor* outputTensor[] = {nullptr}; HWTEST_F() local 2214 NN_Tensor* outputTensor[] = {nullptr}; HWTEST_F() local 2242 NN_Tensor* outputTensor[] = {nullptr}; HWTEST_F() local 2270 NN_Tensor* outputTensor[sizetensor]; HWTEST_F() local 2298 NN_Tensor* outputTensor[sizetensor]; HWTEST_F() local [all...] |
/foundation/ai/neural_network_runtime/interfaces/kits/c/neural_network_runtime/ |
H A D | neural_network_core.h | 1008 * @param outputTensor An array of output tensors {@link NN_Tensor}. 1019 NN_Tensor *outputTensor[], 1051 * @param outputTensor An array of output tensors {@link NN_Tensor}. 1064 NN_Tensor *outputTensor[],
|
H A D | neural_network_runtime_type.h | 202 * The <b>outputTensor</b> and <b>outputCount</b> are the inference results, which is the same as ones passed to 208 * @param outputTensor An array of output tensors {@link NN_Tensor} of the model, which is the same as the argument 209 * <b>outputTensor</b> passed to {@link OH_NNExecutor_RunAsync}. 215 typedef void (*NN_OnRunDone)(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount);
|
/foundation/ai/neural_network_runtime/example/deep_learning_framework/tflite/tools/ |
H A D | utils.cpp | 142 TfLiteTensor* outputTensor = interpreter->tensor(output_index); in PrintResult() local 143 switch (outputTensor->type) { in PrintResult() 157 LOG(ERROR) << "Unsupportted tensor datatype: " << outputTensor->type << "!"; in PrintResult()
|
/foundation/ai/neural_network_runtime/frameworks/native/neural_network_core/ |
H A D | neural_network_core.cpp | 1629 NN_Tensor *outputTensor[], in RunSync() 1644 OH_NN_ReturnCode ret = executor->RunSync(inputTensor, inputCount, outputTensor, outputCount); in RunSync() 1676 NN_Tensor *outputTensor[], in OH_NNExecutor_RunSync() 1691 if (outputTensor == nullptr) { in OH_NNExecutor_RunSync() 1692 LOGE("OH_NNExecutor_RunSync failed, outputTensor is nullptr."); in OH_NNExecutor_RunSync() 1701 return RunSync(executorImpl, inputTensor, inputCount, outputTensor, outputCount); in OH_NNExecutor_RunSync() 1707 NN_Tensor* outputTensor[], in OH_NNExecutor_RunAsync() 1724 if (outputTensor == nullptr) { in OH_NNExecutor_RunAsync() 1725 LOGE("OH_NNExecutor_RunAsync failed, outputTensor is nullptr."); in OH_NNExecutor_RunAsync() 1738 return executorImpl->RunAsync(inputTensor, inputCount, outputTensor, outputCoun in OH_NNExecutor_RunAsync() 1626 RunSync(Executor *executor, NN_Tensor *inputTensor[], size_t inputCount, NN_Tensor *outputTensor[], size_t outputCount) RunSync() argument 1673 OH_NNExecutor_RunSync(OH_NNExecutor *executor, NN_Tensor *inputTensor[], size_t inputCount, NN_Tensor *outputTensor[], size_t outputCount) OH_NNExecutor_RunSync() argument 1704 OH_NNExecutor_RunAsync(OH_NNExecutor *executor, NN_Tensor* inputTensor[], size_t inputCount, NN_Tensor* outputTensor[], size_t outputCount, int32_t timeout, void* userData) OH_NNExecutor_RunAsync() argument [all...] |
/foundation/ai/neural_network_runtime/frameworks/native/neural_network_runtime/ |
H A D | inner_model.cpp | 736 for (auto outputTensor : m_outputTensors) { in GetOutputTensorDescs() 738 outputTensor->ConvertToTensorDesc(*(tensorDescPair.first.get())); in GetOutputTensorDescs() 739 tensorDescPair.second = outputTensor->GetType(); in GetOutputTensorDescs()
|
/foundation/ai/neural_network_runtime/test/unittest/components/v2_0/hdi_prepared_model/ |
H A D | hdi_prepared_model_test.cpp | 253 IOTensor outputTensor; in HWTEST_F() local 254 outputTensor.dataType = OH_NN_INT8; in HWTEST_F()
|
/foundation/ai/neural_network_runtime/test/unittest/components/v1_0/hdi_prepared_model/ |
H A D | hdi_prepared_model_test.cpp | 256 IOTensor outputTensor; in HWTEST_F() local 257 outputTensor.dataType = OH_NN_INT8; in HWTEST_F()
|
/foundation/ai/neural_network_runtime/test/unittest/components/v2_1/hdi_prepared_model/ |
H A D | hdi_prepared_model_test.cpp | 254 IOTensor outputTensor; in HWTEST_F() local 255 outputTensor.dataType = OH_NN_INT8; in HWTEST_F()
|
/foundation/ai/neural_network_runtime/test/unittest/components/nn_executor/ |
H A D | nn_executor_test.cpp | 780 void MyOnRunDone(void *userData, OH_NN_ReturnCode errCode, void *outputTensor[], int32_t outputCount) in MyOnRunDone() argument 788 // 使用 outputTensor[] 和 outputCount 处理成功的结果 in MyOnRunDone() 789 // 例如,outputTensor 可能指向了神经网络输出数据的内存位置 in MyOnRunDone()
|