This repository has been archived by the owner on Jul 18, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 51
/
IENetwork.cpp
81 lines (66 loc) · 2.38 KB
/
IENetwork.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
#include "IENetwork.h"
#include "ie_common.h"
#include <android-base/logging.h>
#include <android/log.h>
#include <ie_blob.h>
#include <log/log.h>
#undef LOG_TAG
#define LOG_TAG "IENetwork"
namespace android {
namespace hardware {
namespace neuralnetworks {
namespace nnhal {
bool IENetwork::loadNetwork() {
ALOGD("%s", __func__);
#if __ANDROID__
InferenceEngine::Core ie(std::string("/vendor/etc/openvino/plugins.xml"));
#else
InferenceEngine::Core ie(std::string("/usr/local/lib64/plugins.xml"));
#endif
std::map<std::string, std::string> config;
if (mNetwork) {
mExecutableNw = ie.LoadNetwork(*mNetwork, "CPU");
ALOGD("LoadNetwork is done....");
mInferRequest = mExecutableNw.CreateInferRequest();
ALOGD("CreateInfereRequest is done....");
mInputInfo = mNetwork->getInputsInfo();
mOutputInfo = mNetwork->getOutputsInfo();
} else {
ALOGE("Invalid Network pointer");
return false;
}
return true;
}
// Need to be called before loadnetwork.. But not sure whether need to be called for
// all the inputs in case multiple input / output
void IENetwork::prepareInput(InferenceEngine::Precision precision, InferenceEngine::Layout layout) {
ALOGE("%s", __func__);
auto inputInfoItem = *mInputInfo.begin();
inputInfoItem.second->setPrecision(precision);
inputInfoItem.second->setLayout(layout);
}
void IENetwork::prepareOutput(InferenceEngine::Precision precision,
InferenceEngine::Layout layout) {
InferenceEngine::DataPtr& output = mOutputInfo.begin()->second;
output->setPrecision(precision);
output->setLayout(layout);
}
void IENetwork::setBlob(const std::string& inName, const InferenceEngine::Blob::Ptr& inputBlob) {
ALOGI("setBlob input or output blob name : %s", inName.c_str());
mInferRequest.SetBlob(inName, inputBlob);
}
InferenceEngine::TBlob<float>::Ptr IENetwork::getBlob(const std::string& outName) {
InferenceEngine::Blob::Ptr outputBlob;
outputBlob = mInferRequest.GetBlob(outName);
return android::hardware::neuralnetworks::nnhal::As<InferenceEngine::TBlob<float>>(outputBlob);
}
void IENetwork::infer() {
ALOGI("Infer Network\n");
mInferRequest.StartAsync();
mInferRequest.Wait(10000);
ALOGI("infer request completed");
}
} // namespace nnhal
} // namespace neuralnetworks
} // namespace hardware
} // namespace android