[llvm] 8fc9eea - Test that volatile load type isn't changed

JF Bastien via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 9 11:22:25 PDT 2020


Author: JF Bastien
Date: 2020-03-09T11:19:23-07:00
New Revision: 8fc9eea43a94d83e46614eee47b069e25848fedb

URL: https://github.com/llvm/llvm-project/commit/8fc9eea43a94d83e46614eee47b069e25848fedb
DIFF: https://github.com/llvm/llvm-project/commit/8fc9eea43a94d83e46614eee47b069e25848fedb.diff

LOG: Test that volatile load type isn't changed

Summary: As discussed in D75505, it's not particularly useful to change the type of a load to/from floating-point/integer because it's followed by a bitcast, and it might lead to surprising code generation. Check that this doesn't generally happen.

Reviewers: lebedev.ri

Subscribers: jkorous, dexonsmith, ributzka, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D75644

Added: 
    llvm/test/Transforms/InstCombine/volatile_load_cast.ll

Modified: 
    llvm/docs/LangRef.rst

Removed: 
    


################################################################################
diff  --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 5ad97fb87267..480a0fc4c07a 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -2456,10 +2456,11 @@ The compiler may assume execution will continue after a volatile operation,
 so operations which modify memory or may have undefined behavior can be
 hoisted past a volatile operation.
 
-IR-level volatile loads and stores cannot safely be optimized into
-llvm.memcpy or llvm.memmove intrinsics even when those intrinsics are
-flagged volatile. Likewise, the backend should never split or merge
-target-legal volatile load/store instructions.
+IR-level volatile loads and stores cannot safely be optimized into llvm.memcpy
+or llvm.memmove intrinsics even when those intrinsics are flagged volatile.
+Likewise, the backend should never split or merge target-legal volatile
+load/store instructions. Similarly, IR-level volatile loads and stores cannot
+change from integer to floating-point or vice versa.
 
 .. admonition:: Rationale
 

diff  --git a/llvm/test/Transforms/InstCombine/volatile_load_cast.ll b/llvm/test/Transforms/InstCombine/volatile_load_cast.ll
new file mode 100644
index 000000000000..7eb0a5427c92
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/volatile_load_cast.ll
@@ -0,0 +1,59 @@
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+; Ensure that volatile loads followed by a bitcast don't get transformed into a
+; volatile load of the bitcast-target type. This is unlikely to provide much in
+; terms of optimizations, and might break the programmer's expectation for code
+; generation, however brittle that expectation might be.
+;
+; See llvm.org/D75644 and llvm.org/D75505
+target datalayout = "e-p:64:64-i32:32:32-i64:64:64-f32:32:32-f64:64:64"
+
+define float @float_load(i32* %addr) {
+; CHECK-LABEL: @float_load(
+; CHECK:         %i32 = load volatile i32, i32* %addr, align 4
+; CHECK-NEXT:    %float = bitcast i32 %i32 to float
+; CHECK-NEXT:    ret float %float
+  %i32 = load volatile i32, i32* %addr, align 4
+  %float = bitcast i32 %i32 to float
+  ret float %float
+}
+
+define i32 @i32_load(float* %addr) {
+; CHECK-LABEL: @i32_load(
+; CHECK:         %float = load volatile float, float* %addr, align 4
+; CHECK-NEXT:    %i32 = bitcast float %float to i32
+; CHECK-NEXT:    ret i32 %i32
+  %float = load volatile float, float* %addr, align 4
+  %i32 = bitcast float %float to i32
+  ret i32 %i32
+}
+
+define double @double_load(i64* %addr) {
+; CHECK-LABEL: @double_load(
+; CHECK:         %i64 = load volatile i64, i64* %addr, align 8
+; CHECK-NEXT:    %double = bitcast i64 %i64 to double
+; CHECK-NEXT:    ret double %double
+  %i64 = load volatile i64, i64* %addr, align 8
+  %double = bitcast i64 %i64 to double
+  ret double %double
+}
+
+define i64 @i64_load(double* %addr) {
+; CHECK-LABEL: @i64_load(
+; CHECK:         %double = load volatile double, double* %addr, align 8
+; CHECK-NEXT:    %i64 = bitcast double %double to i64
+; CHECK-NEXT:    ret i64 %i64
+  %double = load volatile double, double* %addr, align 8
+  %i64 = bitcast double %double to i64
+  ret i64 %i64
+}
+
+define i8* @ptr_load(i64* %addr) {
+; CHECK-LABEL: @ptr_load(
+; CHECK:         %i64 = load volatile i64, i64* %addr, align 8
+; CHECK-NEXT:    %ptr = inttoptr i64 %i64 to i8*
+; CHECK-NEXT:    ret i8* %ptr
+  %i64 = load volatile i64, i64* %addr, align 8
+  %ptr = inttoptr i64 %i64 to i8*
+  ret i8* %ptr
+}


        


More information about the llvm-commits mailing list