[llvm] 961dd1a - [RISCV] Add scalar type test coverage for ISD::AVG nodes
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Jun 15 08:16:05 PDT 2024
Author: Simon Pilgrim
Date: 2024-06-15T16:15:50+01:00
New Revision: 961dd1ae5ea216024af3aa7d43a57c6b45a023ec
URL: https://github.com/llvm/llvm-project/commit/961dd1ae5ea216024af3aa7d43a57c6b45a023ec
DIFF: https://github.com/llvm/llvm-project/commit/961dd1ae5ea216024af3aa7d43a57c6b45a023ec.diff
LOG: [RISCV] Add scalar type test coverage for ISD::AVG nodes
Forked from the equivalent X86 tests
Added:
llvm/test/CodeGen/RISCV/avgceils.ll
llvm/test/CodeGen/RISCV/avgceilu.ll
llvm/test/CodeGen/RISCV/avgfloors.ll
llvm/test/CodeGen/RISCV/avgflooru.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/avgceils.ll b/llvm/test/CodeGen/RISCV/avgceils.ll
new file mode 100644
index 0000000000000..2ff4ad3b3b462
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/avgceils.ll
@@ -0,0 +1,243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck -check-prefix=RV64I %s
+
+;
+; fixed avg(x,y) = sub(or(x,y),ashr(xor(x,y),1))
+;
+; ext avg(x,y) = trunc(ashr(add(sext(x),sext(y),1),1))
+;
+
+define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: srai a1, a1, 24
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: srai a0, a0, 24
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 56
+; RV64I-NEXT: srai a1, a1, 56
+; RV64I-NEXT: slli a0, a0, 56
+; RV64I-NEXT: srai a0, a0, 56
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %or = or i8 %a0, %a1
+ %xor = xor i8 %a0, %a1
+ %shift = ashr i8 %xor, 1
+ %res = sub i8 %or, %shift
+ ret i8 %res
+}
+
+define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_ext_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: srai a1, a1, 24
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: srai a0, a0, 24
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 56
+; RV64I-NEXT: srai a1, a1, 56
+; RV64I-NEXT: slli a0, a0, 56
+; RV64I-NEXT: srai a0, a0, 56
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = sext i8 %a0 to i16
+ %x1 = sext i8 %a1 to i16
+ %sum = add i16 %x0, %x1
+ %sum1 = add i16 %sum, 1
+ %shift = ashr i16 %sum1, 1
+ %res = trunc i16 %shift to i8
+ ret i8 %res
+}
+
+define i16 @test_fixed_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: srai a1, a1, 16
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srai a0, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 48
+; RV64I-NEXT: srai a1, a1, 48
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srai a0, a0, 48
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %or = or i16 %a0, %a1
+ %xor = xor i16 %a0, %a1
+ %shift = ashr i16 %xor, 1
+ %res = sub i16 %or, %shift
+ ret i16 %res
+}
+
+define i16 @test_ext_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_ext_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: srai a1, a1, 16
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srai a0, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 48
+; RV64I-NEXT: srai a1, a1, 48
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srai a0, a0, 48
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = sext i16 %a0 to i32
+ %x1 = sext i16 %a1 to i32
+ %sum = add i32 %x0, %x1
+ %sum1 = add i32 %sum, 1
+ %shift = ashr i32 %sum1, 1
+ %res = trunc i32 %shift to i16
+ ret i16 %res
+}
+
+define i32 @test_fixed_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: sub a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sext.w a1, a1
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %or = or i32 %a0, %a1
+ %xor = xor i32 %a1, %a0
+ %shift = ashr i32 %xor, 1
+ %res = sub i32 %or, %shift
+ ret i32 %res
+}
+
+define i32 @test_ext_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_ext_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: sub a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sext.w a1, a1
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = sext i32 %a0 to i64
+ %x1 = sext i32 %a1 to i64
+ %sum = add i64 %x0, %x1
+ %sum1 = add i64 %sum, 1
+ %shift = ashr i64 %sum1, 1
+ %res = trunc i64 %shift to i32
+ ret i32 %res
+}
+
+define i64 @test_fixed_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srai a3, a1, 1
+; RV32I-NEXT: sub a4, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a3, a0, a2
+; RV32I-NEXT: srli a3, a3, 1
+; RV32I-NEXT: or a3, a3, a1
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: sltu a1, a0, a3
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: or a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: ret
+ %or = or i64 %a0, %a1
+ %xor = xor i64 %a1, %a0
+ %shift = ashr i64 %xor, 1
+ %res = sub i64 %or, %shift
+ ret i64 %res
+}
+
+define i64 @test_ext_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_ext_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srai a3, a1, 1
+; RV32I-NEXT: sub a4, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a3, a0, a2
+; RV32I-NEXT: srli a3, a3, 1
+; RV32I-NEXT: or a3, a3, a1
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: sltu a1, a0, a3
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: or a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: ret
+ %x0 = sext i64 %a0 to i128
+ %x1 = sext i64 %a1 to i128
+ %sum = add i128 %x0, %x1
+ %sum1 = add i128 %sum, 1
+ %shift = ashr i128 %sum1, 1
+ %res = trunc i128 %shift to i64
+ ret i64 %res
+}
diff --git a/llvm/test/CodeGen/RISCV/avgceilu.ll b/llvm/test/CodeGen/RISCV/avgceilu.ll
new file mode 100644
index 0000000000000..cc12b585036ab
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/avgceilu.ll
@@ -0,0 +1,239 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck -check-prefix=RV64I %s
+
+;
+; fixed avg(x,y) = sub(or(x,y),lshr(xor(x,y),1))
+;
+; ext avg(x,y) = trunc(lshr(add(zext(x),zext(y),1),1))
+;
+
+define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %or = or i8 %a0, %a1
+ %xor = xor i8 %a0, %a1
+ %shift = lshr i8 %xor, 1
+ %res = sub i8 %or, %shift
+ ret i8 %res
+}
+
+define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_ext_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = zext i8 %a0 to i16
+ %x1 = zext i8 %a1 to i16
+ %sum = add i16 %x0, %x1
+ %sum1 = add i16 %sum, 1
+ %shift = lshr i16 %sum1, 1
+ %res = trunc i16 %shift to i8
+ ret i8 %res
+}
+
+define i16 @test_fixed_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 16
+; RV32I-NEXT: addi a2, a2, -1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a2, 16
+; RV64I-NEXT: addiw a2, a2, -1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %or = or i16 %a0, %a1
+ %xor = xor i16 %a0, %a1
+ %shift = lshr i16 %xor, 1
+ %res = sub i16 %or, %shift
+ ret i16 %res
+}
+
+define i16 @test_ext_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_ext_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 16
+; RV32I-NEXT: addi a2, a2, -1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: addi a0, a0, 1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a2, 16
+; RV64I-NEXT: addiw a2, a2, -1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = zext i16 %a0 to i32
+ %x1 = zext i16 %a1 to i32
+ %sum = add i32 %x0, %x1
+ %sum1 = add i32 %sum, 1
+ %shift = lshr i32 %sum1, 1
+ %res = trunc i32 %shift to i16
+ ret i16 %res
+}
+
+define i32 @test_fixed_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: sub a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %or = or i32 %a0, %a1
+ %xor = xor i32 %a1, %a0
+ %shift = lshr i32 %xor, 1
+ %res = sub i32 %or, %shift
+ ret i32 %res
+}
+
+define i32 @test_ext_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_ext_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: sub a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: addi a0, a0, 1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = zext i32 %a0 to i64
+ %x1 = zext i32 %a1 to i64
+ %sum = add i64 %x0, %x1
+ %sum1 = add i64 %sum, 1
+ %shift = lshr i64 %sum1, 1
+ %res = trunc i64 %shift to i32
+ ret i32 %res
+}
+
+define i64 @test_fixed_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srli a3, a1, 1
+; RV32I-NEXT: sub a4, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a3, a0, a2
+; RV32I-NEXT: srli a3, a3, 1
+; RV32I-NEXT: or a3, a3, a1
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: sltu a1, a0, a3
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: or a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: ret
+ %or = or i64 %a0, %a1
+ %xor = xor i64 %a1, %a0
+ %shift = lshr i64 %xor, 1
+ %res = sub i64 %or, %shift
+ ret i64 %res
+}
+
+define i64 @test_ext_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_ext_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: or a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srli a3, a1, 1
+; RV32I-NEXT: sub a4, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a3, a0, a2
+; RV32I-NEXT: srli a3, a3, 1
+; RV32I-NEXT: or a3, a3, a1
+; RV32I-NEXT: or a0, a0, a2
+; RV32I-NEXT: sltu a1, a0, a3
+; RV32I-NEXT: sub a1, a4, a1
+; RV32I-NEXT: sub a0, a0, a3
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: or a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: sub a0, a2, a0
+; RV64I-NEXT: ret
+ %x0 = zext i64 %a0 to i128
+ %x1 = zext i64 %a1 to i128
+ %sum = add i128 %x0, %x1
+ %sum1 = add i128 %sum, 1
+ %shift = lshr i128 %sum1, 1
+ %res = trunc i128 %shift to i64
+ ret i64 %res
+}
diff --git a/llvm/test/CodeGen/RISCV/avgfloors.ll b/llvm/test/CodeGen/RISCV/avgfloors.ll
new file mode 100644
index 0000000000000..b36177de021d1
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/avgfloors.ll
@@ -0,0 +1,229 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck -check-prefix=RV64I %s
+
+;
+; fixed avg(x,y) = add(and(x,y),ashr(xor(x,y),1))
+;
+; ext avg(x,y) = trunc(ashr(add(sext(x),sext(y)),1))
+;
+
+define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: srai a1, a1, 24
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: srai a0, a0, 24
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 56
+; RV64I-NEXT: srai a1, a1, 56
+; RV64I-NEXT: slli a0, a0, 56
+; RV64I-NEXT: srai a0, a0, 56
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %and = and i8 %a0, %a1
+ %xor = xor i8 %a0, %a1
+ %shift = ashr i8 %xor, 1
+ %res = add i8 %and, %shift
+ ret i8 %res
+}
+
+define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_ext_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 24
+; RV32I-NEXT: srai a1, a1, 24
+; RV32I-NEXT: slli a0, a0, 24
+; RV32I-NEXT: srai a0, a0, 24
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 56
+; RV64I-NEXT: srai a1, a1, 56
+; RV64I-NEXT: slli a0, a0, 56
+; RV64I-NEXT: srai a0, a0, 56
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = sext i8 %a0 to i16
+ %x1 = sext i8 %a1 to i16
+ %sum = add i16 %x0, %x1
+ %shift = ashr i16 %sum, 1
+ %res = trunc i16 %shift to i8
+ ret i8 %res
+}
+
+define i16 @test_fixed_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: srai a1, a1, 16
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srai a0, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 48
+; RV64I-NEXT: srai a1, a1, 48
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srai a0, a0, 48
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %and = and i16 %a0, %a1
+ %xor = xor i16 %a0, %a1
+ %shift = ashr i16 %xor, 1
+ %res = add i16 %and, %shift
+ ret i16 %res
+}
+
+define i16 @test_ext_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_ext_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: slli a1, a1, 16
+; RV32I-NEXT: srai a1, a1, 16
+; RV32I-NEXT: slli a0, a0, 16
+; RV32I-NEXT: srai a0, a0, 16
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 48
+; RV64I-NEXT: srai a1, a1, 48
+; RV64I-NEXT: slli a0, a0, 48
+; RV64I-NEXT: srai a0, a0, 48
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = sext i16 %a0 to i32
+ %x1 = sext i16 %a1 to i32
+ %sum = add i32 %x0, %x1
+ %shift = ashr i32 %sum, 1
+ %res = trunc i32 %shift to i16
+ ret i16 %res
+}
+
+define i32 @test_fixed_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sext.w a1, a1
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %and = and i32 %a0, %a1
+ %xor = xor i32 %a1, %a0
+ %shift = ashr i32 %xor, 1
+ %res = add i32 %and, %shift
+ ret i32 %res
+}
+
+define i32 @test_ext_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_ext_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srai a0, a0, 1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: sext.w a1, a1
+; RV64I-NEXT: sext.w a0, a0
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = sext i32 %a0 to i64
+ %x1 = sext i32 %a1 to i64
+ %sum = add i64 %x0, %x1
+ %shift = ashr i64 %sum, 1
+ %res = trunc i64 %shift to i32
+ ret i32 %res
+}
+
+define i64 @test_fixed_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srai a3, a1, 1
+; RV32I-NEXT: add a3, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a4, a0, a2
+; RV32I-NEXT: srli a4, a4, 1
+; RV32I-NEXT: or a1, a4, a1
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: add a0, a2, a1
+; RV32I-NEXT: sltu a1, a0, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: ret
+ %and = and i64 %a0, %a1
+ %xor = xor i64 %a1, %a0
+ %shift = ashr i64 %xor, 1
+ %res = add i64 %and, %shift
+ ret i64 %res
+}
+
+define i64 @test_ext_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_ext_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srai a3, a1, 1
+; RV32I-NEXT: add a3, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a4, a0, a2
+; RV32I-NEXT: srli a4, a4, 1
+; RV32I-NEXT: or a1, a4, a1
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: add a0, a2, a1
+; RV32I-NEXT: sltu a1, a0, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srai a0, a0, 1
+; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: ret
+ %x0 = sext i64 %a0 to i128
+ %x1 = sext i64 %a1 to i128
+ %sum = add i128 %x0, %x1
+ %shift = ashr i128 %sum, 1
+ %res = trunc i128 %shift to i64
+ ret i64 %res
+}
diff --git a/llvm/test/CodeGen/RISCV/avgflooru.ll b/llvm/test/CodeGen/RISCV/avgflooru.ll
new file mode 100644
index 0000000000000..b58aaab6aaf4a
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/avgflooru.ll
@@ -0,0 +1,225 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck -check-prefix=RV64I %s
+
+;
+; fixed avg(x,y) = add(and(x,y),lshr(xor(x,y),1))
+;
+; ext avg(x,y) = trunc(lshr(add(zext(x),zext(y)),1))
+;
+
+define i8 @test_fixed_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %and = and i8 %a0, %a1
+ %xor = xor i8 %a0, %a1
+ %shift = lshr i8 %xor, 1
+ %res = add i8 %and, %shift
+ ret i8 %res
+}
+
+define i8 @test_ext_i8(i8 %a0, i8 %a1) nounwind {
+; RV32I-LABEL: test_ext_i8:
+; RV32I: # %bb.0:
+; RV32I-NEXT: andi a1, a1, 255
+; RV32I-NEXT: andi a0, a0, 255
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i8:
+; RV64I: # %bb.0:
+; RV64I-NEXT: andi a1, a1, 255
+; RV64I-NEXT: andi a0, a0, 255
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = zext i8 %a0 to i16
+ %x1 = zext i8 %a1 to i16
+ %sum = add i16 %x0, %x1
+ %shift = lshr i16 %sum, 1
+ %res = trunc i16 %shift to i8
+ ret i8 %res
+}
+
+define i16 @test_fixed_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 16
+; RV32I-NEXT: addi a2, a2, -1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a2, 16
+; RV64I-NEXT: addiw a2, a2, -1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %and = and i16 %a0, %a1
+ %xor = xor i16 %a0, %a1
+ %shift = lshr i16 %xor, 1
+ %res = add i16 %and, %shift
+ ret i16 %res
+}
+
+define i16 @test_ext_i16(i16 %a0, i16 %a1) nounwind {
+; RV32I-LABEL: test_ext_i16:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 16
+; RV32I-NEXT: addi a2, a2, -1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: and a0, a0, a2
+; RV32I-NEXT: add a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i16:
+; RV64I: # %bb.0:
+; RV64I-NEXT: lui a2, 16
+; RV64I-NEXT: addiw a2, a2, -1
+; RV64I-NEXT: and a1, a1, a2
+; RV64I-NEXT: and a0, a0, a2
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = zext i16 %a0 to i32
+ %x1 = zext i16 %a1 to i32
+ %sum = add i32 %x0, %x1
+ %shift = lshr i32 %sum, 1
+ %res = trunc i32 %shift to i16
+ ret i16 %res
+}
+
+define i32 @test_fixed_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %and = and i32 %a0, %a1
+ %xor = xor i32 %a1, %a0
+ %shift = lshr i32 %xor, 1
+ %res = add i32 %and, %shift
+ ret i32 %res
+}
+
+define i32 @test_ext_i32(i32 %a0, i32 %a1) nounwind {
+; RV32I-LABEL: test_ext_i32:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a2, a0, a1
+; RV32I-NEXT: xor a0, a0, a1
+; RV32I-NEXT: srli a0, a0, 1
+; RV32I-NEXT: add a0, a2, a0
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i32:
+; RV64I: # %bb.0:
+; RV64I-NEXT: slli a1, a1, 32
+; RV64I-NEXT: srli a1, a1, 32
+; RV64I-NEXT: slli a0, a0, 32
+; RV64I-NEXT: srli a0, a0, 32
+; RV64I-NEXT: add a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: ret
+ %x0 = zext i32 %a0 to i64
+ %x1 = zext i32 %a1 to i64
+ %sum = add i64 %x0, %x1
+ %shift = lshr i64 %sum, 1
+ %res = trunc i64 %shift to i32
+ ret i32 %res
+}
+
+define i64 @test_fixed_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_fixed_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srli a3, a1, 1
+; RV32I-NEXT: add a3, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a4, a0, a2
+; RV32I-NEXT: srli a4, a4, 1
+; RV32I-NEXT: or a1, a4, a1
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: add a0, a2, a1
+; RV32I-NEXT: sltu a1, a0, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_fixed_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: ret
+ %and = and i64 %a0, %a1
+ %xor = xor i64 %a1, %a0
+ %shift = lshr i64 %xor, 1
+ %res = add i64 %and, %shift
+ ret i64 %res
+}
+
+define i64 @test_ext_i64(i64 %a0, i64 %a1) nounwind {
+; RV32I-LABEL: test_ext_i64:
+; RV32I: # %bb.0:
+; RV32I-NEXT: and a4, a1, a3
+; RV32I-NEXT: xor a1, a1, a3
+; RV32I-NEXT: srli a3, a1, 1
+; RV32I-NEXT: add a3, a4, a3
+; RV32I-NEXT: slli a1, a1, 31
+; RV32I-NEXT: xor a4, a0, a2
+; RV32I-NEXT: srli a4, a4, 1
+; RV32I-NEXT: or a1, a4, a1
+; RV32I-NEXT: and a2, a0, a2
+; RV32I-NEXT: add a0, a2, a1
+; RV32I-NEXT: sltu a1, a0, a2
+; RV32I-NEXT: add a1, a3, a1
+; RV32I-NEXT: ret
+;
+; RV64I-LABEL: test_ext_i64:
+; RV64I: # %bb.0:
+; RV64I-NEXT: and a2, a0, a1
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: srli a0, a0, 1
+; RV64I-NEXT: add a0, a2, a0
+; RV64I-NEXT: ret
+ %x0 = zext i64 %a0 to i128
+ %x1 = zext i64 %a1 to i128
+ %sum = add i128 %x0, %x1
+ %shift = lshr i128 %sum, 1
+ %res = trunc i128 %shift to i64
+ ret i64 %res
+}
More information about the llvm-commits
mailing list