Skip to content

Conversation

@wizardengineer
Copy link
Contributor

Add comprehensive test suite for RISC-V fallback implementation:

  • Edge cases (zero conditions, large integers, sign extension)
  • Pattern matching (nested selects, chains)
  • Vector support with RVV extensions
  • Side effects and memory operations

The basic fallback test is in the core infrastructure PR.

Copy link
Contributor Author

wizardengineer commented Nov 6, 2025

Warning

This pull request is not mergeable via GitHub because a downstack PR is open. Once all requirements are satisfied, merge this PR as a stack on Graphite.
Learn more

This stack of pull requests is managed by Graphite. Learn more about stacking.

@wizardengineer wizardengineer force-pushed the users/wizardengineer/ct-select-riscv branch from 05465f4 to 9ed3c7d Compare November 6, 2025 17:28
@wizardengineer wizardengineer force-pushed the users/wizardengineer/ct-select-clang branch from cbb5490 to 6ac8221 Compare November 6, 2025 17:28
Add comprehensive test suite for RISC-V fallback implementation:
- Edge cases (zero conditions, large integers, sign extension)
- Pattern matching (nested selects, chains)
- Vector support with RVV extensions
- Side effects and memory operations

The basic fallback test is in the core infrastructure PR.
@llvmbot
Copy link
Member

llvmbot commented Nov 6, 2025

@llvm/pr-subscribers-backend-risc-v

Author: Julius Alexandre (wizardengineer)

Changes

Add comprehensive test suite for RISC-V fallback implementation:

  • Edge cases (zero conditions, large integers, sign extension)
  • Pattern matching (nested selects, chains)
  • Vector support with RVV extensions
  • Side effects and memory operations

The basic fallback test is in the core infrastructure PR.


Patch is 54.38 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/166708.diff

4 Files Affected:

  • (added) llvm/test/CodeGen/RISCV/ctselect-fallback-edge-cases.ll (+214)
  • (added) llvm/test/CodeGen/RISCV/ctselect-fallback-patterns.ll (+383)
  • (added) llvm/test/CodeGen/RISCV/ctselect-fallback-vector-rvv.ll (+804)
  • (added) llvm/test/CodeGen/RISCV/ctselect-side-effects.ll (+176)
diff --git a/llvm/test/CodeGen/RISCV/ctselect-fallback-edge-cases.ll b/llvm/test/CodeGen/RISCV/ctselect-fallback-edge-cases.ll
new file mode 100644
index 0000000000000..af1be0c8f3ddc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/ctselect-fallback-edge-cases.ll
@@ -0,0 +1,214 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv64 -O3 | FileCheck %s --check-prefix=RV64
+; RUN: llc < %s -mtriple=riscv32 -O3 | FileCheck %s --check-prefix=RV32
+
+; Test with small integer types
+define i1 @test_ctselect_i1(i1 %cond, i1 %a, i1 %b) {
+; RV64-LABEL: test_ctselect_i1:
+; RV64:       # %bb.0:
+; RV64-NEXT:    and a1, a0, a1
+; RV64-NEXT:    xori a0, a0, 1
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_i1:
+; RV32:       # %bb.0:
+; RV32-NEXT:    and a1, a0, a1
+; RV32-NEXT:    xori a0, a0, 1
+; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    ret
+  %result = call i1 @llvm.ct.select.i1(i1 %cond, i1 %a, i1 %b)
+  ret i1 %result
+}
+
+; Test with extremal values
+define i32 @test_ctselect_extremal_values(i1 %cond) {
+; RV64-LABEL: test_ctselect_extremal_values:
+; RV64:       # %bb.0:
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    lui a1, 524288
+; RV64-NEXT:    subw a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_extremal_values:
+; RV32:       # %bb.0:
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    lui a1, 524288
+; RV32-NEXT:    addi a2, a0, -1
+; RV32-NEXT:    neg a0, a0
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    slli a0, a0, 1
+; RV32-NEXT:    srli a0, a0, 1
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %result = call i32 @llvm.ct.select.i32(i1 %cond, i32 2147483647, i32 -2147483648)
+  ret i32 %result
+}
+
+; Test with null pointers
+define ptr @test_ctselect_null_ptr(i1 %cond, ptr %ptr) {
+; RV64-LABEL: test_ctselect_null_ptr:
+; RV64:       # %bb.0:
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_null_ptr:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slli a0, a0, 31
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    ret
+  %result = call ptr @llvm.ct.select.p0(i1 %cond, ptr %ptr, ptr null)
+  ret ptr %result
+}
+
+; Test with function pointers
+define ptr @test_ctselect_function_ptr(i1 %cond, ptr %func1, ptr %func2) {
+; RV64-LABEL: test_ctselect_function_ptr:
+; RV64:       # %bb.0:
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a3, a0
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    and a1, a3, a1
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_function_ptr:
+; RV32:       # %bb.0:
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a3, a0
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a1, a3, a1
+; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    ret
+  %result = call ptr @llvm.ct.select.p0(i1 %cond, ptr %func1, ptr %func2)
+  ret ptr %result
+}
+
+; Test with condition from icmp on pointers
+define ptr @test_ctselect_ptr_cmp(ptr %p1, ptr %p2, ptr %a, ptr %b) {
+; RV64-LABEL: test_ctselect_ptr_cmp:
+; RV64:       # %bb.0:
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    and a2, a0, a2
+; RV64-NEXT:    not a0, a0
+; RV64-NEXT:    and a0, a0, a3
+; RV64-NEXT:    or a0, a2, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_ptr_cmp:
+; RV32:       # %bb.0:
+; RV32-NEXT:    xor a0, a0, a1
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a2, a0, a2
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    and a0, a0, a3
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    ret
+  %cmp = icmp eq ptr %p1, %p2
+  %result = call ptr @llvm.ct.select.p0(i1 %cmp, ptr %a, ptr %b)
+  ret ptr %result
+}
+
+; Test with struct pointer types
+%struct.pair = type { i32, i32 }
+
+define ptr @test_ctselect_struct_ptr(i1 %cond, ptr %a, ptr %b) {
+; RV64-LABEL: test_ctselect_struct_ptr:
+; RV64:       # %bb.0:
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    neg a3, a0
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    and a1, a3, a1
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    or a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_struct_ptr:
+; RV32:       # %bb.0:
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a3, a0
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a1, a3, a1
+; RV32-NEXT:    and a0, a0, a2
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    ret
+  %result = call ptr @llvm.ct.select.p0(i1 %cond, ptr %a, ptr %b)
+  ret ptr %result
+}
+
+; Test with deeply nested conditions
+define i32 @test_ctselect_deeply_nested(i1 %c1, i1 %c2, i1 %c3, i1 %c4, i32 %a, i32 %b, i32 %c, i32 %d, i32 %e) {
+; RV64-LABEL: test_ctselect_deeply_nested:
+; RV64:       # %bb.0:
+; RV64-NEXT:    lw t0, 0(sp)
+; RV64-NEXT:    xor a4, a4, a5
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    xor a5, a5, a6
+; RV64-NEXT:    slli a1, a1, 63
+; RV64-NEXT:    xor a6, a6, a7
+; RV64-NEXT:    slli a2, a2, 63
+; RV64-NEXT:    slli a3, a3, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    srai a1, a1, 63
+; RV64-NEXT:    srai a2, a2, 63
+; RV64-NEXT:    and a0, a4, a0
+; RV64-NEXT:    xor a0, a0, a5
+; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    xor a1, a7, t0
+; RV64-NEXT:    xor a0, a0, a6
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    srai a3, a3, 63
+; RV64-NEXT:    and a0, a0, a3
+; RV64-NEXT:    xor a0, a0, t0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_deeply_nested:
+; RV32:       # %bb.0:
+; RV32-NEXT:    lw t0, 0(sp)
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    andi a1, a1, 1
+; RV32-NEXT:    andi a2, a2, 1
+; RV32-NEXT:    andi a3, a3, 1
+; RV32-NEXT:    neg t1, a0
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a4, t1, a4
+; RV32-NEXT:    neg t1, a1
+; RV32-NEXT:    addi a1, a1, -1
+; RV32-NEXT:    and a0, a0, a5
+; RV32-NEXT:    neg a5, a2
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a1, a1, a6
+; RV32-NEXT:    neg a6, a3
+; RV32-NEXT:    addi a3, a3, -1
+; RV32-NEXT:    and a2, a2, a7
+; RV32-NEXT:    or a0, a4, a0
+; RV32-NEXT:    and a0, t1, a0
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    and a0, a5, a0
+; RV32-NEXT:    or a0, a0, a2
+; RV32-NEXT:    and a0, a6, a0
+; RV32-NEXT:    and a1, a3, t0
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %sel1 = call i32 @llvm.ct.select.i32(i1 %c1, i32 %a, i32 %b)
+  %sel2 = call i32 @llvm.ct.select.i32(i1 %c2, i32 %sel1, i32 %c)
+  %sel3 = call i32 @llvm.ct.select.i32(i1 %c3, i32 %sel2, i32 %d)
+  %sel4 = call i32 @llvm.ct.select.i32(i1 %c4, i32 %sel3, i32 %e)
+  ret i32 %sel4
+}
+
+; Declare the intrinsics
+declare i1 @llvm.ct.select.i1(i1, i1, i1)
+declare i32 @llvm.ct.select.i32(i1, i32, i32)
+declare ptr @llvm.ct.select.p0(i1, ptr, ptr)
diff --git a/llvm/test/CodeGen/RISCV/ctselect-fallback-patterns.ll b/llvm/test/CodeGen/RISCV/ctselect-fallback-patterns.ll
new file mode 100644
index 0000000000000..1149971fd090e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/ctselect-fallback-patterns.ll
@@ -0,0 +1,383 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv64 -O3 | FileCheck %s --check-prefix=RV64
+; RUN: llc < %s -mtriple=riscv32 -O3 | FileCheck %s --check-prefix=RV32
+
+; Test smin(x, 0) pattern
+define i32 @test_ctselect_smin_zero(i32 %x) {
+; RV64-LABEL: test_ctselect_smin_zero:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sraiw a1, a0, 31
+; RV64-NEXT:    and a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_smin_zero:
+; RV32:       # %bb.0:
+; RV32-NEXT:    srai a1, a0, 31
+; RV32-NEXT:    and a0, a1, a0
+; RV32-NEXT:    ret
+  %cmp = icmp slt i32 %x, 0
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %x, i32 0)
+  ret i32 %result
+}
+
+; Test smax(x, 0) pattern
+define i32 @test_ctselect_smax_zero(i32 %x) {
+; RV64-LABEL: test_ctselect_smax_zero:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a1, a0
+; RV64-NEXT:    sgtz a1, a1
+; RV64-NEXT:    neg a1, a1
+; RV64-NEXT:    and a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_smax_zero:
+; RV32:       # %bb.0:
+; RV32-NEXT:    sgtz a1, a0
+; RV32-NEXT:    neg a1, a1
+; RV32-NEXT:    and a0, a1, a0
+; RV32-NEXT:    ret
+  %cmp = icmp sgt i32 %x, 0
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %x, i32 0)
+  ret i32 %result
+}
+
+; Test generic smin pattern
+define i32 @test_ctselect_smin_generic(i32 %x, i32 %y) {
+; RV64-LABEL: test_ctselect_smin_generic:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a2, a1
+; RV64-NEXT:    sext.w a3, a0
+; RV64-NEXT:    slt a2, a3, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    neg a2, a2
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_smin_generic:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slt a2, a0, a1
+; RV32-NEXT:    neg a3, a2
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a0, a3, a0
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %cmp = icmp slt i32 %x, %y
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %x, i32 %y)
+  ret i32 %result
+}
+
+; Test generic smax pattern
+define i32 @test_ctselect_smax_generic(i32 %x, i32 %y) {
+; RV64-LABEL: test_ctselect_smax_generic:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a2, a0
+; RV64-NEXT:    sext.w a3, a1
+; RV64-NEXT:    slt a2, a3, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    neg a2, a2
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_smax_generic:
+; RV32:       # %bb.0:
+; RV32-NEXT:    slt a2, a1, a0
+; RV32-NEXT:    neg a3, a2
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a0, a3, a0
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %cmp = icmp sgt i32 %x, %y
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %x, i32 %y)
+  ret i32 %result
+}
+
+; Test umin pattern
+define i32 @test_ctselect_umin_generic(i32 %x, i32 %y) {
+; RV64-LABEL: test_ctselect_umin_generic:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a2, a1
+; RV64-NEXT:    sext.w a3, a0
+; RV64-NEXT:    sltu a2, a3, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    neg a2, a2
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_umin_generic:
+; RV32:       # %bb.0:
+; RV32-NEXT:    sltu a2, a0, a1
+; RV32-NEXT:    neg a3, a2
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a0, a3, a0
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %cmp = icmp ult i32 %x, %y
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %x, i32 %y)
+  ret i32 %result
+}
+
+; Test umax pattern
+define i32 @test_ctselect_umax_generic(i32 %x, i32 %y) {
+; RV64-LABEL: test_ctselect_umax_generic:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a2, a0
+; RV64-NEXT:    sext.w a3, a1
+; RV64-NEXT:    sltu a2, a3, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    neg a2, a2
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_umax_generic:
+; RV32:       # %bb.0:
+; RV32-NEXT:    sltu a2, a1, a0
+; RV32-NEXT:    neg a3, a2
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a0, a3, a0
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %cmp = icmp ugt i32 %x, %y
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %x, i32 %y)
+  ret i32 %result
+}
+
+; Test abs pattern
+define i32 @test_ctselect_abs(i32 %x) {
+; RV64-LABEL: test_ctselect_abs:
+; RV64:       # %bb.0:
+; RV64-NEXT:    negw a1, a0
+; RV64-NEXT:    xor a1, a1, a0
+; RV64-NEXT:    sraiw a2, a0, 31
+; RV64-NEXT:    and a1, a1, a2
+; RV64-NEXT:    xor a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_abs:
+; RV32:       # %bb.0:
+; RV32-NEXT:    neg a1, a0
+; RV32-NEXT:    srai a2, a0, 31
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    not a2, a2
+; RV32-NEXT:    and a0, a2, a0
+; RV32-NEXT:    or a0, a1, a0
+; RV32-NEXT:    ret
+  %neg = sub i32 0, %x
+  %cmp = icmp slt i32 %x, 0
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %neg, i32 %x)
+  ret i32 %result
+}
+
+; Test nabs pattern (negative abs)
+define i32 @test_ctselect_nabs(i32 %x) {
+; RV64-LABEL: test_ctselect_nabs:
+; RV64:       # %bb.0:
+; RV64-NEXT:    negw a1, a0
+; RV64-NEXT:    xor a2, a0, a1
+; RV64-NEXT:    sraiw a0, a0, 31
+; RV64-NEXT:    and a0, a2, a0
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_nabs:
+; RV32:       # %bb.0:
+; RV32-NEXT:    neg a1, a0
+; RV32-NEXT:    srai a2, a0, 31
+; RV32-NEXT:    and a0, a2, a0
+; RV32-NEXT:    not a2, a2
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %neg = sub i32 0, %x
+  %cmp = icmp slt i32 %x, 0
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 %x, i32 %neg)
+  ret i32 %result
+}
+
+; Test sign extension pattern
+define i32 @test_ctselect_sign_extend(i32 %x) {
+; RV64-LABEL: test_ctselect_sign_extend:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sraiw a0, a0, 31
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_sign_extend:
+; RV32:       # %bb.0:
+; RV32-NEXT:    srai a0, a0, 31
+; RV32-NEXT:    ret
+  %cmp = icmp slt i32 %x, 0
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 -1, i32 0)
+  ret i32 %result
+}
+
+; Test zero extension pattern
+define i32 @test_ctselect_zero_extend(i32 %x) {
+; RV64-LABEL: test_ctselect_zero_extend:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    snez a0, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_zero_extend:
+; RV32:       # %bb.0:
+; RV32-NEXT:    snez a0, a0
+; RV32-NEXT:    ret
+  %cmp = icmp ne i32 %x, 0
+  %result = call i32 @llvm.ct.select.i32(i1 %cmp, i32 1, i32 0)
+  ret i32 %result
+}
+
+; Test constant folding with known condition
+define i32 @test_ctselect_constant_folding_true(i32 %a, i32 %b) {
+; RV64-LABEL: test_ctselect_constant_folding_true:
+; RV64:       # %bb.0:
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_constant_folding_true:
+; RV32:       # %bb.0:
+; RV32-NEXT:    ret
+  %result = call i32 @llvm.ct.select.i32(i1 true, i32 %a, i32 %b)
+  ret i32 %result
+}
+
+define i32 @test_ctselect_constant_folding_false(i32 %a, i32 %b) {
+; RV64-LABEL: test_ctselect_constant_folding_false:
+; RV64:       # %bb.0:
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_constant_folding_false:
+; RV32:       # %bb.0:
+; RV32-NEXT:    mv a0, a1
+; RV32-NEXT:    ret
+  %result = call i32 @llvm.ct.select.i32(i1 false, i32 %a, i32 %b)
+  ret i32 %result
+}
+
+; Test with identical operands
+define i32 @test_ctselect_identical_operands(i1 %cond, i32 %x) {
+; RV64-LABEL: test_ctselect_identical_operands:
+; RV64:       # %bb.0:
+; RV64-NEXT:    mv a0, a1
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_identical_operands:
+; RV32:       # %bb.0:
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    neg a2, a0
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a2, a2, a1
+; RV32-NEXT:    and a0, a0, a1
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    ret
+  %result = call i32 @llvm.ct.select.i32(i1 %cond, i32 %x, i32 %x)
+  ret i32 %result
+}
+
+; Test with inverted condition
+define i32 @test_ctselect_inverted_condition(i32 %x, i32 %y, i32 %a, i32 %b) {
+; RV64-LABEL: test_ctselect_inverted_condition:
+; RV64:       # %bb.0:
+; RV64-NEXT:    sext.w a1, a1
+; RV64-NEXT:    sext.w a0, a0
+; RV64-NEXT:    xor a0, a0, a1
+; RV64-NEXT:    seqz a0, a0
+; RV64-NEXT:    xor a2, a2, a3
+; RV64-NEXT:    addi a0, a0, -1
+; RV64-NEXT:    and a0, a2, a0
+; RV64-NEXT:    xor a0, a0, a3
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_inverted_condition:
+; RV32:       # %bb.0:
+; RV32-NEXT:    xor a0, a0, a1
+; RV32-NEXT:    seqz a0, a0
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a2, a0, a2
+; RV32-NEXT:    not a0, a0
+; RV32-NEXT:    and a0, a0, a3
+; RV32-NEXT:    or a0, a2, a0
+; RV32-NEXT:    ret
+  %cmp = icmp eq i32 %x, %y
+  %not_cmp = xor i1 %cmp, true
+  %result = call i32 @llvm.ct.select.i32(i1 %not_cmp, i32 %a, i32 %b)
+  ret i32 %result
+}
+
+; Test chain of ct.select operations
+define i32 @test_ctselect_chain(i1 %c1, i1 %c2, i1 %c3, i32 %a, i32 %b, i32 %c, i32 %d) {
+; RV64-LABEL: test_ctselect_chain:
+; RV64:       # %bb.0:
+; RV64-NEXT:    xor a3, a3, a4
+; RV64-NEXT:    slli a0, a0, 63
+; RV64-NEXT:    xor a4, a4, a5
+; RV64-NEXT:    slli a1, a1, 63
+; RV64-NEXT:    xor a5, a5, a6
+; RV64-NEXT:    slli a2, a2, 63
+; RV64-NEXT:    srai a0, a0, 63
+; RV64-NEXT:    srai a1, a1, 63
+; RV64-NEXT:    and a0, a3, a0
+; RV64-NEXT:    xor a0, a0, a4
+; RV64-NEXT:    and a0, a0, a1
+; RV64-NEXT:    xor a0, a0, a5
+; RV64-NEXT:    srai a2, a2, 63
+; RV64-NEXT:    and a0, a0, a2
+; RV64-NEXT:    xor a0, a0, a6
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_chain:
+; RV32:       # %bb.0:
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    andi a1, a1, 1
+; RV32-NEXT:    andi a2, a2, 1
+; RV32-NEXT:    neg a7, a0
+; RV32-NEXT:    addi a0, a0, -1
+; RV32-NEXT:    and a3, a7, a3
+; RV32-NEXT:    neg a7, a1
+; RV32-NEXT:    addi a1, a1, -1
+; RV32-NEXT:    and a0, a0, a4
+; RV32-NEXT:    neg a4, a2
+; RV32-NEXT:    addi a2, a2, -1
+; RV32-NEXT:    and a1, a1, a5
+; RV32-NEXT:    or a0, a3, a0
+; RV32-NEXT:    and a0, a7, a0
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    and a0, a4, a0
+; RV32-NEXT:    and a1, a2, a6
+; RV32-NEXT:    or a0, a0, a1
+; RV32-NEXT:    ret
+  %sel1 = call i32 @llvm.ct.select.i32(i1 %c1, i32 %a, i32 %b)
+  %sel2 = call i32 @llvm.ct.select.i32(i1 %c2, i32 %sel1, i32 %c)
+  %sel3 = call i32 @llvm.ct.select.i32(i1 %c3, i32 %sel2, i32 %d)
+  ret i32 %sel3
+}
+
+; Test for 64-bit operations (supported on all 64-bit architectures)
+define i64 @test_ctselect_i64_smin_zero(i64 %x) {
+; RV64-LABEL: test_ctselect_i64_smin_zero:
+; RV64:       # %bb.0:
+; RV64-NEXT:    srai a1, a0, 63
+; RV64-NEXT:    and a0, a1, a0
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: test_ctselect_i64_smin_zero:
+; RV32:       # %bb.0:
+; RV32-NEXT:    srai a2, a1, 31
+; RV32-NEXT:    and a0, a2, a0
+; RV32-NEXT:    and a1, a2, a1
+; RV32-NEXT:    ret
+  %cmp = icmp slt i64 %x, 0
+  %result = call i64 @llvm.ct.select.i64(i1 %cmp, i64 %x, i64 0)
+  ret i64 %result
+}
+
+; Declare the intrinsics
+declare i32 @llvm.ct.select.i32(i1, i32, i32)
+declare i64 @llvm.ct.select.i64(i1, i64, i64)
diff --git a/llvm/test/CodeGen/RISCV/ctselect-fallback-vector-rvv.ll b/llvm/test/CodeGen/RISCV/ctselect-fallback-vector-rvv.ll
new file mode 100644
index 0000000000000..a02e1e4749443
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/ctselect-fallback-vector-rvv.ll
@@ -0,0 +1,804 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v        -O3 | FileCheck %s --check-prefix=RV64
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v        -O3 | FileCheck %s --check-prefix=RV32
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvl128b -O3 | FileCheck %s --check-prefix=RV32-V128
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvl256b -O3 | FileCheck %s --check-prefix=RV64-V256
+
+
+; Basic pass-through select on nxv4i32
+define <vscale x 4 x i32> @ctsel_nxv4i32_basic(i1 %cond, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; RV64-LABEL: ctsel_nxv4i32_basic:
+; RV64:       # %bb.0:
+; RV64-NEXT:    andi a0, a0, 1
+; RV64-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; RV64-NEXT:    vmv.v.x v12, a0
+; RV64-NEXT:    vmsne.vi v0, v12, 0
+; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT:    vmv.v.i v12, 0
+; RV64-NEXT:    vmerge.vim v12, v12, -1, v0
+; RV64-NEXT:    vand.vv v8, v12, v8
+; RV64-NEXT:    vnot.v v12, v12
+; RV64-NEXT:    vand.vv v10, v12, v10
+; RV64-NEXT:    vor.vv v8, v8, v10
+; RV64-NEXT:    ret
+;
+; RV32-LABEL: ctsel_nxv4i32_basic:
+; RV32:       # %bb.0:
+; RV32-NEXT:    andi a0, a0, 1
+; RV32-NEXT:    vsetvli a1, zero, e8, mf2, ta, ma
+; RV32-NEXT:    vmv.v.x v12, a0
+; RV32-NEXT:    vmsne.vi v0, v12, 0
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vmv.v.i v12, 0
+; RV32-NEXT:    vmerge.vim v12, v12, -1, v0
+; RV32-N...
[truncated]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants