Update nn-samples with dependency APIs.

This commit is contained in:
Xusong Wang
2020-03-17 15:09:04 -07:00
parent 3d57ed5207
commit 5d4dd8bcf0
5 changed files with 45 additions and 16 deletions

View File

@@ -3,7 +3,7 @@ apply plugin: 'com.android.application'
android {
compileSdkVersion 'android-R'
buildToolsVersion '30.0.0 rc1'
ndkVersion '21.1.6210238'
ndkVersion '21.1.6273396'
defaultConfig {
applicationId "com.example.android.basic"

View File

@@ -6,7 +6,7 @@ buildscript {
jcenter()
}
dependencies {
classpath 'com.android.tools.build:gradle:4.1.0-alpha01'
classpath 'com.android.tools.build:gradle:4.1.0-alpha02'
// NOTE: Do not place your application dependencies here; they belong
// in the individual module build.gradle files

View File

@@ -1,6 +1,6 @@
#Wed Mar 04 14:46:09 PST 2020
#Tue Mar 17 14:03:57 PDT 2020
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-6.2-bin.zip
distributionUrl=https\://services.gradle.org/distributions/gradle-6.2.1-bin.zip

View File

@@ -3,7 +3,7 @@ apply plugin: 'com.android.application'
android {
compileSdkVersion 'android-R'
buildToolsVersion '30.0.0 rc1'
ndkVersion '21.1.6210238'
ndkVersion '21.1.6273396'
defaultConfig {
applicationId "com.example.android.sequence"

View File

@@ -557,9 +557,9 @@ bool SimpleSequenceModel::CreateOpaqueMemories() {
}
/**
* Compute a single step of accumulating the geometric progression.
* Dispatch a single computation step of accumulating the geometric progression.
*/
static bool ComputeSingleStep(ANeuralNetworksCompilation* compilation,
static bool DispatchSingleStep(ANeuralNetworksCompilation* compilation,
ANeuralNetworksMemory* sumIn,
uint32_t sumInLength,
ANeuralNetworksMemory* stateIn,
@@ -567,7 +567,9 @@ static bool ComputeSingleStep(ANeuralNetworksCompilation* compilation,
ANeuralNetworksMemory* sumOut,
uint32_t sumOutLength,
ANeuralNetworksMemory* stateOut,
uint32_t stateOutLength) {
uint32_t stateOutLength,
const ANeuralNetworksEvent* waitFor,
ANeuralNetworksEvent** event) {
// Create an ANeuralNetworksExecution object from the compiled model.
ANeuralNetworksExecution* execution;
int32_t status = ANeuralNetworksExecution_create(compilation, &execution);
@@ -642,9 +644,20 @@ static bool ComputeSingleStep(ANeuralNetworksCompilation* compilation,
return false;
}
// Compute the execution of the model.
// Note that the execution here is synchronous.
status = ANeuralNetworksExecution_compute(execution);
// Dispatch the execution of the model.
// Note that the execution here is asynchronous with dependencies.
constexpr uint64_t kTimeOutDurationInNs = 100'000'000; // 100 ms
const ANeuralNetworksEvent* const* dependencies = nullptr;
uint32_t numDependencies = 0;
if (waitFor != nullptr) {
dependencies = &waitFor;
numDependencies = 1;
}
status = ANeuralNetworksExecution_startComputeWithDependencies(execution,
dependencies,
numDependencies,
kTimeOutDurationInNs,
event);
if (status != ANEURALNETWORKS_NO_ERROR) {
__android_log_print(ANDROID_LOG_ERROR, LOG_TAG,
"ANeuralNetworksExecution_compute failed");
@@ -679,11 +692,14 @@ bool SimpleSequenceModel::Compute(float initialValue,
fillMemory(sumInFd_, tensorSize_, 0);
fillMemory(initialStateFd_, tensorSize_, initialValue);
// The event objects for all computation steps.
std::vector<ANeuralNetworksEvent*> events(steps, nullptr);
for (uint32_t i = 0; i < steps; i++) {
// We will only use ASharedMemory for boundary step executions, and use
// opaque memories for intermediate results to minimize the data copying.
// Note that when setting an opaque memory as the input or output of an
// execution, the offset and length must be set to 0 to indicate the
// Note that when setting an opaque memory as the input or output of an
// execution, the offset and length must be set to 0 to indicate the
// entire memory region is used.
ANeuralNetworksMemory* sumInMemory;
ANeuralNetworksMemory* sumOutMemory;
@@ -711,7 +727,10 @@ bool SimpleSequenceModel::Compute(float initialValue,
stateOutMemory = memoryOpaqueStateOut_;
stateOutLength = 0;
if (!ComputeSingleStep(compilation_,
// Dispatch a single computation step with a dependency on the previous step, if any.
// The actual computation will start once its dependency has finished.
const ANeuralNetworksEvent* waitFor = i == 0 ? nullptr : events[i - 1];
if (!DispatchSingleStep(compilation_,
sumInMemory,
sumInLength,
stateInMemory,
@@ -719,10 +738,12 @@ bool SimpleSequenceModel::Compute(float initialValue,
sumOutMemory,
sumOutLength,
stateOutMemory,
stateOutLength)) {
stateOutLength,
waitFor,
&events[i])) {
__android_log_print(ANDROID_LOG_ERROR,
LOG_TAG,
"ComputeSingleStep failed for step %d",
"DispatchSingleStep failed for step %d",
i);
return false;
}
@@ -733,6 +754,9 @@ bool SimpleSequenceModel::Compute(float initialValue,
std::swap(memoryOpaqueStateIn_, memoryOpaqueStateOut_);
}
// Since the events are chained, we only need to wait for the last one.
ANeuralNetworksEvent_wait(events.back());
// Get the results.
float* outputTensorPtr = reinterpret_cast<float*>(
mmap(nullptr,
@@ -743,6 +767,11 @@ bool SimpleSequenceModel::Compute(float initialValue,
0));
*result = outputTensorPtr[0];
munmap(outputTensorPtr, tensorSize_ * sizeof(float));
// Cleanup event objects.
for (auto* event : events) {
ANeuralNetworksEvent_free(event);
}
return true;
}