[PATCH] D85250: [llvm] Expose type and element count-related APIs on TensorSpec
Mircea Trofin via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 4 17:32:33 PDT 2020
This revision was landed with ongoing or failed builds.
This revision was automatically updated to reflect the committed changes.
Closed by commit rG90b9c49ca647: [llvm] Expose type and element count-related APIs on TensorSpec (authored by mtrofin).
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D85250/new/
https://reviews.llvm.org/D85250
Files:
llvm/include/llvm/Analysis/Utils/TFUtils.h
llvm/lib/Analysis/TFUtils.cpp
llvm/unittests/Analysis/TFUtilsTest.cpp
Index: llvm/unittests/Analysis/TFUtilsTest.cpp
===================================================================
--- llvm/unittests/Analysis/TFUtilsTest.cpp
+++ llvm/unittests/Analysis/TFUtilsTest.cpp
@@ -123,3 +123,18 @@
auto Spec = getTensorSpecFromJSON(Ctx, *Value);
EXPECT_FALSE(Spec.hasValue());
}
+
+TEST(TFUtilsTest, TensorSpecSizesAndTypes) {
+ auto Spec1D = TensorSpec::createSpec<int16_t>("Hi1", {1});
+ auto Spec2D = TensorSpec::createSpec<int16_t>("Hi2", {1, 1});
+ auto Spec1DLarge = TensorSpec::createSpec<float>("Hi3", {10});
+ auto Spec3DLarge = TensorSpec::createSpec<float>("Hi3", {2, 4, 10});
+ EXPECT_TRUE(Spec1D.isElementType<int16_t>());
+ EXPECT_FALSE(Spec3DLarge.isElementType<double>());
+ EXPECT_EQ(Spec1D.getElementCount(), 1);
+ EXPECT_EQ(Spec2D.getElementCount(), 1);
+ EXPECT_EQ(Spec1DLarge.getElementCount(), 10);
+ EXPECT_EQ(Spec3DLarge.getElementCount(), 80);
+ EXPECT_EQ(Spec3DLarge.getElementByteSize(), sizeof(float));
+ EXPECT_EQ(Spec1D.getElementByteSize(), sizeof(int16_t));
+}
\ No newline at end of file
Index: llvm/lib/Analysis/TFUtils.cpp
===================================================================
--- llvm/lib/Analysis/TFUtils.cpp
+++ llvm/lib/Analysis/TFUtils.cpp
@@ -24,6 +24,7 @@
#include "tensorflow/c/c_api_experimental.h"
#include <cassert>
+#include <numeric>
using namespace llvm;
@@ -84,6 +85,16 @@
std::vector<TF_Tensor *> Output;
};
+size_t TensorSpec::getElementByteSize() const {
+ return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
+}
+
+TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
+ const std::vector<int64_t> &Shape)
+ : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
+ ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
+ std::multiplies<int64_t>())) {}
+
Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
const json::Value &Value) {
auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
Index: llvm/include/llvm/Analysis/Utils/TFUtils.h
===================================================================
--- llvm/include/llvm/Analysis/Utils/TFUtils.h
+++ llvm/include/llvm/Analysis/Utils/TFUtils.h
@@ -66,10 +66,18 @@
bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
+ /// Get the number of elements in a tensor with this shape.
+ size_t getElementCount() const { return ElementCount; }
+ /// Get the size, in bytes, of one element.
+ size_t getElementByteSize() const;
+
+ template <typename T> bool isElementType() const {
+ return getDataType<T>() == TypeIndex;
+ }
+
private:
TensorSpec(const std::string &Name, int Port, int TypeIndex,
- const std::vector<int64_t> &Shape)
- : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape) {}
+ const std::vector<int64_t> &Shape);
template <typename T> static int getDataType() {
llvm_unreachable("Undefined tensor type");
@@ -79,6 +87,7 @@
int Port = 0;
int TypeIndex = 0;
std::vector<int64_t> Shape;
+ size_t ElementCount = 0;
};
Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D85250.283074.patch
Type: text/x-patch
Size: 3259 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20200805/28ce5f9b/attachment.bin>
More information about the llvm-commits
mailing list