1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include <string>
16 #include <sys/mman.h>
17
18 #include "nncore_const.h"
19 #include "mock_idevice.h"
20 #include "hdi_device_v2_0.h"
21 #include "hdi_returncode_utils.h"
22 #include "common/log.h"
23 #include "common/utils.h"
24 #include "nnbackend.h"
25 #include "backend_registrar.h"
26
27 namespace OHOS {
28 namespace NeuralNetworkRuntime {
PrintRetLog(int32_t ret, int32_t nnrtSuccess, const std::string& makeName)29 void PrintRetLog(int32_t ret, int32_t nnrtSuccess, const std::string& makeName)
30 {
31 if (ret < nnrtSuccess) {
32 LOGW("%s failed. An error occurred in HDI, errorcode is %{public}d.", makeName.c_str(), ret);
33 } else {
34 OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode nnrtRet = static_cast<OHOS::HDI::Nnrt::V2_0::NNRT_ReturnCode>(ret);
35 LOGW("%s failed. Errorcode is %{public}s.", makeName.c_str(), ConverterRetToString(nnrtRet).c_str());
36 }
37 }
38
HDIDeviceV2_0Creator()39 std::shared_ptr<Backend> HDIDeviceV2_0Creator()
40 {
41 std::string deviceName;
42 std::string vendorName;
43 std::string version;
44
45 // only one device from HDI now.
46 OHOS::sptr<V2_0::INnrtDevice> iDevice = V2_0::INnrtDevice::Get();
47 if (iDevice == nullptr) {
48 LOGW("Get HDI device failed.");
49 return nullptr;
50 }
51
52 auto ret = iDevice->GetDeviceName(deviceName);
53 int32_t nnrtSuccess = static_cast<int32_t>(V2_0::NNRT_ReturnCode::NNRT_SUCCESS);
54 if (ret != nnrtSuccess) {
55 std::string makeName = "Get device name";
56 PrintRetLog(ret, nnrtSuccess, makeName);
57 return nullptr;
58 }
59
60 ret = iDevice->GetVendorName(vendorName);
61 if (ret != nnrtSuccess) {
62 std::string makeName = "Get vendor name";
63 PrintRetLog(ret, nnrtSuccess, makeName);
64 return nullptr;
65 }
66
67 std::pair<uint32_t, uint32_t> hdiVersion;
68 ret = iDevice->GetVersion(hdiVersion.first, hdiVersion.second);
69 if (ret != nnrtSuccess) {
70 std::string makeName = "Get version";
71 PrintRetLog(ret, nnrtSuccess, makeName);
72 return nullptr;
73 }
74 version = 'v' + std::to_string(hdiVersion.first) + '_' + std::to_string(hdiVersion.second);
75 const std::string& backendName = GenUniqueName(deviceName, vendorName, version);
76
77 std::shared_ptr<Device> device = CreateSharedPtr<HDIDeviceV2_0>(iDevice);
78 if (device == nullptr) {
79 LOGW("Failed to create device, because fail to create device instance.");
80 return nullptr;
81 }
82
83 std::shared_ptr<Backend> backend = std::make_shared<NNBackend>(device, std::hash<std::string>{}(backendName));
84 if (backend == nullptr) {
85 LOGW("Failed to register backend, because fail to create backend.");
86 }
87 return backend;
88 }
89
90 REGISTER_BACKEND(HDIDeviceV2_0, HDIDeviceV2_0Creator)
91 } // namespace NeuralNetworkRuntime
92 } // namespace OHOS
93
94 namespace OHOS {
95 namespace HDI {
96 namespace Nnrt {
97 namespace V2_0 {
98
Get(bool isStub)99 sptr<INnrtDevice> INnrtDevice::Get(bool isStub)
100 {
101 return INnrtDevice::Get("mock_device_service", isStub);
102 }
103
Get(const std::string &serviceName, bool isStub)104 sptr<INnrtDevice> INnrtDevice::Get(const std::string &serviceName, bool isStub)
105 {
106 if (isStub) {
107 return nullptr;
108 }
109 sptr<INnrtDevice> mockIDevice = sptr<MockIDevice>(MockIDevice::GetInstance());
110 return mockIDevice;
111 }
112
~MockIDevice()113 MockIDevice::~MockIDevice()
114 {
115 for (auto fd : m_fds) {
116 close(fd);
117 }
118 }
119
MockIDevice()120 MockIDevice::MockIDevice()
121 {
122 m_bufferFd = 0;
123 }
124
~MockIPreparedModel()125 MockIPreparedModel::~MockIPreparedModel()
126 {
127 for (auto fd : m_fds) {
128 close(fd);
129 }
130 }
131
GetInstance()132 MockIDevice *MockIDevice::GetInstance()
133 {
134 static MockIDevice iDevice;
135 return &iDevice;
136 }
137
SetFP16Supported(bool isSupported)138 void MockIDevice::SetFP16Supported(bool isSupported)
139 {
140 m_fp16 = isSupported;
141 }
142
SetPerformanceSupported(bool isSupported)143 void MockIDevice::SetPerformanceSupported(bool isSupported)
144 {
145 m_performance = isSupported;
146 }
147
SetPrioritySupported(bool isSupported)148 void MockIDevice::SetPrioritySupported(bool isSupported)
149 {
150 m_priority = isSupported;
151 }
152
SetModelCacheSupported(bool isSupported)153 void MockIDevice::SetModelCacheSupported(bool isSupported)
154 {
155 m_cache = isSupported;
156 }
157
SetOperationsSupported(std::vector<bool> isSupported)158 void MockIDevice::SetOperationsSupported(std::vector<bool> isSupported)
159 {
160 m_operations = isSupported;
161 }
162
SetDynamicInputSupported(bool isSupported)163 void MockIDevice::SetDynamicInputSupported(bool isSupported)
164 {
165 m_dynamic = isSupported;
166 }
167
GetDeviceName(std::string& name)168 int32_t MockIDevice::GetDeviceName(std::string& name)
169 {
170 name = "Device-CPU";
171 return HDF_SUCCESS;
172 }
173
GetVendorName(std::string& name)174 int32_t MockIDevice::GetVendorName(std::string& name)
175 {
176 name = "TestVendor";
177 return HDF_SUCCESS;
178 }
179
GetDeviceType(DeviceType& deviceType)180 int32_t MockIDevice::GetDeviceType(DeviceType& deviceType)
181 {
182 deviceType = DeviceType::CPU;
183 return HDF_SUCCESS;
184 }
185
GetDeviceStatus(DeviceStatus& status)186 int32_t MockIDevice::GetDeviceStatus(DeviceStatus& status)
187 {
188 status = DeviceStatus::AVAILABLE;
189 return HDF_SUCCESS;
190 }
191
GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)192 int32_t MockIDevice::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
193 {
194 uint32_t twoNum = 2;
195 majorVersion = twoNum;
196 minorVersion = 0;
197 return HDF_SUCCESS;
198 }
199
GetSupportedOperation(const Model& model, std::vector<bool>& ops)200 int32_t MockIDevice::GetSupportedOperation(const Model& model, std::vector<bool>& ops)
201 {
202 ops = m_operations;
203 return HDF_SUCCESS;
204 }
205
IsFloat16PrecisionSupported(bool& isSupported)206 int32_t MockIDevice::IsFloat16PrecisionSupported(bool& isSupported)
207 {
208 isSupported = m_fp16;
209 return HDF_SUCCESS;
210 }
211
IsPerformanceModeSupported(bool& isSupported)212 int32_t MockIDevice::IsPerformanceModeSupported(bool& isSupported)
213 {
214 isSupported = m_performance;
215 return HDF_SUCCESS;
216 }
217
IsPrioritySupported(bool& isSupported)218 int32_t MockIDevice::IsPrioritySupported(bool& isSupported)
219 {
220 isSupported = m_priority;
221 return HDF_SUCCESS;
222 }
223
IsDynamicInputSupported(bool& isSupported)224 int32_t MockIDevice::IsDynamicInputSupported(bool& isSupported)
225 {
226 isSupported = m_dynamic;
227 return HDF_SUCCESS;
228 }
229
IsModelCacheSupported(bool& isSupported)230 int32_t MockIDevice::IsModelCacheSupported(bool& isSupported)
231 {
232 isSupported = m_cache;
233 return HDF_SUCCESS;
234 }
235
AllocateBuffer(uint32_t length, SharedBuffer &buffer)236 int32_t MockIDevice::AllocateBuffer(uint32_t length, SharedBuffer &buffer)
237 {
238 std::lock_guard<std::mutex> lock(m_mtx);
239 buffer.fd = AshmemCreate("allocateBuffer", length);
240 buffer.bufferSize = AshmemGetSize(buffer.fd);
241 buffer.offset = 0;
242 buffer.dataSize = length;
243
244 AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE);
245 m_fds.emplace(buffer.fd);
246 m_bufferFd = buffer.fd;
247 return HDF_SUCCESS;
248 }
249
ReleaseBuffer(const SharedBuffer &buffer)250 int32_t MockIDevice::ReleaseBuffer(const SharedBuffer &buffer)
251 {
252 if (m_fds.find(buffer.fd) == m_fds.end()) {
253 LOGE("ReleaseBuffer:buffer fd is invalid. fd = %d", buffer.fd);
254 return HDF_FAILURE;
255 }
256 if (close(buffer.fd) != 0) {
257 LOGE("ReleaseBuffer:Close buffer fd failed. fd = %d", buffer.fd);
258 return HDF_FAILURE;
259 }
260 return HDF_SUCCESS;
261 }
262
PrepareModel(const Model& model, const ModelConfig& config, sptr<IPreparedModel>& preparedModel)263 int32_t MockIDevice::PrepareModel(const Model& model, const ModelConfig& config, sptr<IPreparedModel>& preparedModel)
264 {
265 preparedModel = new (std::nothrow) V2_0::MockIPreparedModel();
266 return HDF_SUCCESS;
267 }
268
PrepareOfflineModel(const std::vector<SharedBuffer>& offlineModels, const ModelConfig& config, sptr<OHOS::HDI::Nnrt::V2_0::IPreparedModel>& preparedModel)269 int32_t MockIDevice::PrepareOfflineModel(const std::vector<SharedBuffer>& offlineModels, const ModelConfig& config,
270 sptr<OHOS::HDI::Nnrt::V2_0::IPreparedModel>& preparedModel)
271 {
272 preparedModel = new (std::nothrow) V2_0::MockIPreparedModel();
273 return V2_0::NNRT_ReturnCode::NNRT_SUCCESS;
274 }
275
PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config, sptr<IPreparedModel>& preparedModel)276 int32_t MockIDevice::PrepareModelFromModelCache(const std::vector<SharedBuffer>& modelCache, const ModelConfig& config,
277 sptr<IPreparedModel>& preparedModel)
278 {
279 preparedModel = new (std::nothrow) V2_0::MockIPreparedModel();
280 return HDF_SUCCESS;
281 }
282
ExportModelCache(std::vector<SharedBuffer>& modelCache)283 int32_t MockIPreparedModel::ExportModelCache(std::vector<SharedBuffer>& modelCache)
284 {
285 if (!modelCache.empty()) {
286 LOGE("[NNRtTest] The parameters of ExportModelCache should be an empty vector.");
287 return HDF_ERR_INVALID_PARAM;
288 }
289 uint8_t bufferData[4] = {0, 1, 2, 3};
290 uint32_t size = sizeof(bufferData);
291 SharedBuffer buffer;
292 buffer.fd = AshmemCreate("cache", size);
293 buffer.bufferSize = AshmemGetSize(buffer.fd);
294 buffer.offset = 0;
295 buffer.dataSize = size;
296 AshmemSetProt(buffer.fd, PROT_READ | PROT_WRITE);
297
298 void* data = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, buffer.fd, 0);
299 if (data == MAP_FAILED) {
300 LOGE("[Mock_Device]::ExportModelCache failed, Map fd to address failed: %{public}s.", strerror(errno));
301 return HDF_FAILURE;
302 }
303
304 auto memRet = memcpy_s(data, size, bufferData, size);
305 auto unmapResult = munmap(data, size);
306 if (unmapResult != 0) {
307 LOGE("[Mock_Device]ExportModelCache failed . Please try again.");
308 return HDF_FAILURE;
309 }
310 if (memRet != EOK) {
311 LOGE("[Mock_Device]ExportModelCache failed, failed to memcpy_s data type.");
312 return HDF_FAILURE;
313 }
314 m_fds.emplace(buffer.fd);
315 modelCache.emplace_back(buffer);
316 return HDF_SUCCESS;
317 }
318
GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)319 int32_t MockIPreparedModel::GetVersion(uint32_t &majorVersion, uint32_t &minorVersion)
320 {
321 majorVersion = 1;
322 minorVersion = 0;
323 return HDF_SUCCESS;
324 }
325
Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs, std::vector<std::vector<int32_t>>& outputsDims)326 int32_t MockIPreparedModel::Run(const std::vector<IOTensor>& inputs, const std::vector<IOTensor>& outputs,
327 std::vector<std::vector<int32_t>>& outputsDims)
328 {
329 outputsDims = {{2, 2, 2, 2}};
330 return HDF_SUCCESS;
331 }
332
GetInputDimRanges(std::vector<std::vector<uint32_t>>& minInputDims, std::vector<std::vector<uint32_t>>& maxInputDims)333 int32_t MockIPreparedModel::GetInputDimRanges(std::vector<std::vector<uint32_t>>& minInputDims,
334 std::vector<std::vector<uint32_t>>& maxInputDims)
335 {
336 minInputDims = {{2, 2, 2, 2}, {2, 2, 2, 2}};
337 maxInputDims = {{2, 100, 100, 10}, {2, 100, 100, 10}};
338
339 return HDF_SUCCESS;
340 }
341
342 } // namespace V2_0
343 } // namespace Nnrt
344 } // namespace HDI
345 } // namespace OHOS