Skip to content

Commit

Permalink
IVGCVSW-7404 Out of bounds detection
Browse files Browse the repository at this point in the history
 * Added test to ensure that all inputs and outputs do not go out of
   bounds.

Signed-off-by: Mike Kelly <[email protected]>
Change-Id: Ia97e85f71e46cd2203306243e4dcbc23e0f29ec1
  • Loading branch information
MikeJKelly authored and KevinARM committed Mar 8, 2023
1 parent 0637bf3 commit 084cb4d
Show file tree
Hide file tree
Showing 5 changed files with 136 additions and 12 deletions.
24 changes: 21 additions & 3 deletions ArmnnPreparedModel.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

Expand Down Expand Up @@ -218,18 +218,27 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(
NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
return V1_0::ErrorStatus::GENERAL_FAILURE;
}

// add the inputs and outputs with their data
try
{
pInputTensors->reserve(request.inputs.size());
for (unsigned int i = 0; i < request.inputs.size(); i++)
{
const auto& inputArg = request.inputs[i];

armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
// pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
// Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
inputTensorInfo.SetConstant();
auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
inputTensorInfo,
inputArg,
"input");
if (result != V1_0::ErrorStatus::NONE)
{
return result;
}

const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools);
if (inputTensor.GetMemoryArea() == nullptr)
{
Expand All @@ -244,8 +253,17 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel<HalVersion>::execute(
for (unsigned int i = 0; i < request.outputs.size(); i++)
{
const auto& outputArg = request.outputs[i];

const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
outputTensorInfo,
outputArg,
"output");

if (result != V1_0::ErrorStatus::NONE)
{
return result;
}

const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools);
if (outputTensor.GetMemoryArea() == nullptr)
{
Expand Down
24 changes: 21 additions & 3 deletions ArmnnPreparedModel_1_2.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

Expand Down Expand Up @@ -312,11 +312,20 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForIn
for (unsigned int i = 0; i < request.inputs.size(); i++)
{
const auto& inputArg = request.inputs[i];

armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
// inputs (of type InputTensors) is composed of a vector of ConstTensors.
// Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
inputTensorInfo.SetConstant();
auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
inputTensorInfo,
inputArg,
"input");

if (result != V1_0::ErrorStatus::NONE)
{
return result;
}

const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);

if (inputTensor.GetMemoryArea() == nullptr)
Expand All @@ -342,8 +351,17 @@ Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForOu
for (unsigned int i = 0; i < request.outputs.size(); i++)
{
const auto& outputArg = request.outputs[i];
armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
outputTensorInfo,
outputArg,
"output");

if (result != V1_0::ErrorStatus::NONE)
{
return result;
}

const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
if (outputTensor.GetMemoryArea() == nullptr)
{
Expand Down
26 changes: 22 additions & 4 deletions ArmnnPreparedModel_1_3.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//
// Note: the ArmnnFencedExecutionCallback and code snippet in the executeFenced() function
Expand Down Expand Up @@ -510,11 +510,20 @@ Return<V1_3::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::PrepareMemoryForIn
for (unsigned int i = 0; i < request.inputs.size(); i++)
{
const auto& inputArg = request.inputs[i];

armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
// inputs (of type InputTensors) is composed of a vector of ConstTensors.
// Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
inputTensorInfo.SetConstant();
auto result = ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(request,
inputTensorInfo,
inputArg,
"input");

if (result != V1_3::ErrorStatus::NONE)
{
return result;
}

const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);

if (inputTensor.GetMemoryArea() == nullptr)
Expand All @@ -540,15 +549,24 @@ Return<V1_3::ErrorStatus> ArmnnPreparedModel_1_3<HalVersion>::PrepareMemoryForOu
for (unsigned int i = 0; i < request.outputs.size(); i++)
{
const auto& outputArg = request.outputs[i];

armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
auto result = ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(request,
outputTensorInfo,
outputArg,
"output");

if (result != V1_3::ErrorStatus::NONE)
{
return result;
}

const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);

if (outputTensor.GetMemoryArea() == nullptr)
{
ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
return V1_3::ErrorStatus::GENERAL_FAILURE;
}

const size_t outputSize = outputTensorInfo.GetNumBytes();

unsigned int count = 0;
Expand Down
65 changes: 64 additions & 1 deletion Utils.cpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

Expand Down Expand Up @@ -767,4 +767,67 @@ void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
#endif
}
}

size_t GetSize(const V1_0::Request& request, const V1_0::RequestArgument& requestArgument)
{
return request.pools[requestArgument.location.poolIndex].size();
}

#ifdef ARMNN_ANDROID_NN_V1_3
size_t GetSize(const V1_3::Request& request, const V1_0::RequestArgument& requestArgument)
{
if (request.pools[requestArgument.location.poolIndex].getDiscriminator() ==
V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory)
{
return request.pools[requestArgument.location.poolIndex].hidlMemory().size();
}
else
{
return 0;
}
}
#endif

template <typename ErrorStatus, typename Request>
ErrorStatus ValidateRequestArgument(const Request& request,
const armnn::TensorInfo& tensorInfo,
const V1_0::RequestArgument& requestArgument,
std::string descString)
{
if (requestArgument.location.poolIndex >= request.pools.size())
{
std::string err = fmt::format("Invalid {} pool at index {} the pool index is greater than the number "
"of available pools {}",
descString, requestArgument.location.poolIndex, request.pools.size());
ALOGE(err.c_str());
return ErrorStatus::GENERAL_FAILURE;
}
const size_t size = GetSize(request, requestArgument);
size_t totalLength = tensorInfo.GetNumBytes();

if (static_cast<size_t>(requestArgument.location.offset) + totalLength > size)
{
std::string err = fmt::format("Invalid {} pool at index {} the offset {} and length {} are greater "
"than the pool size {}", descString, requestArgument.location.poolIndex,
requestArgument.location.offset, totalLength, size);
ALOGE(err.c_str());
return ErrorStatus::GENERAL_FAILURE;
}
return ErrorStatus::NONE;
}

template V1_0::ErrorStatus ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(
const V1_0::Request& request,
const armnn::TensorInfo& tensorInfo,
const V1_0::RequestArgument& requestArgument,
std::string descString);

#ifdef ARMNN_ANDROID_NN_V1_3
template V1_3::ErrorStatus ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(
const V1_3::Request& request,
const armnn::TensorInfo& tensorInfo,
const V1_0::RequestArgument& requestArgument,
std::string descString);
#endif

} // namespace armnn_driver
9 changes: 8 additions & 1 deletion Utils.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//
// Copyright © 2017 Arm Ltd. All rights reserved.
// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
// SPDX-License-Identifier: MIT
//

Expand All @@ -11,6 +11,8 @@
#include <NeuralNetworks.h>
#include <Utils.h>

#include <fmt/format.h>

#include <vector>
#include <string>
#include <fstream>
Expand Down Expand Up @@ -194,4 +196,9 @@ inline V1_2::OutputShape ComputeShape(const armnn::TensorInfo& info)

void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);

template <typename ErrorStatus, typename Request>
ErrorStatus ValidateRequestArgument(const Request& request,
const armnn::TensorInfo& tensorInfo,
const V1_0::RequestArgument& requestArgument,
std::string descString);
} // namespace armnn_driver

0 comments on commit 084cb4d

Please sign in to comment.